licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 6431 | using Pkg
Pkg.activate(@__DIR__)
using SigmaRidgeRegression
using Plots
using StatsBase
using Statistics
using LaTeXStrings
using Random
#using ColorSchemes
# grp = GroupedFeatures(num_groups=2,group_size=200)
# To add to tests.jl
# SigmaRidgeRegression.fixed_point_function(hs, γs, [1.0; Inf])
# SigmaRidgeRegression.risk_formula(hs, γs, αs, [1.0; 10000])
_linestyles = [ :dot :dashdot :dash]
_main_cols = [:grey :purple :green]
id_design = BlockCovarianceDesign([IdentityCovarianceDesign(), IdentityCovarianceDesign()])
function theoretical_and_realized_mse(γs, αs, design::BlockCovarianceDesign; n = 400, nreps = 50, ntest = 20_000)
grp = GroupedFeatures(round.(Int, γs .* n))
design = set_groups(design, grp)
#design = IdentityCovarianceDesign(grp.p)
#hs = [spectrum(design);spectrum(design)]
hs = spectrum.(design.blocks)
λs1 = SigmaRidgeRegression.optimal_ignore_second_group_λs(γs, αs)
λs2 = SigmaRidgeRegression.optimal_single_λ(γs, αs)
λs3 = SigmaRidgeRegression.optimal_λs(γs, αs)
all_λs = (λs1, λs2, λs3)
opt_risk_theory = Matrix{Float64}(undef, 1, length(all_λs))
risk_empirical = Matrix{Float64}(undef, nreps, length(all_λs))
for (i, λs) in enumerate(all_λs)
opt_risk_theory[1, i] = SigmaRidgeRegression.risk_formula(hs, γs, αs, λs)
end
for j = 1:nreps
ridge_sim = GroupRidgeSimulationSettings(
grp = grp,
ntrain = n,
ntest = ntest,
Σ = design,
response_model = RandomLinearResponseModel(αs = αs, grp = grp),
)
sim_res = simulate(ridge_sim)
for (i, λs) in enumerate(all_λs)
risk_empirical[j, i] = mse_ridge(
StatsBase.fit(
MultiGroupRidgeRegressor(grp, λs),
sim_res.X_train,
sim_res.Y_train,
grp,
),
sim_res.X_test,
sim_res.Y_test,
)
end
end
risk_empirical = mean(risk_empirical; dims = 1)
(theoretical = opt_risk_theory, empirical = risk_empirical, all_λs = all_λs)
end
function oracle_risk_plot(
γs,
sum_alpha_squared;
design = id_design,
ylim = (0, 2.5),
n = 1000,
title = nothing,
legend = nothing,
kwargs...,
)
ratio_squared = range(0.0, 1.0, length = 30)
αs_squared = ratio_squared .* sum_alpha_squared
bs_squared = reverse(ratio_squared) .* sum_alpha_squared
risks = [
theoretical_and_realized_mse(
γs,
sqrt.([αs_squared[i]; bs_squared[i]]),
design;
n = n,
kwargs...,
) for i = 1:length(ratio_squared)
]
theoretical_risks = vcat(map(r -> r.theoretical, risks)...) .- 1
empirical_risks = vcat(map(r -> r.empirical, risks)...) .- 1
labels =
[L"$\;$Optimal $\blambda = (\lambda, \infty)$" L"$\;$Optimal $\blambda = (\lambda, \lambda)$" L"$\;$Optimal $\blambda = (\lambda_1, \lambda_2)$"]
#colors = reshape(colorschemes[:seaborn_deep6][1:3], 1, 3)
#colors = [:red :blue :purple]
ylabel = L"$\risk{\blambda}- \sigma^2$"
xlabel = L"\alpha_1^2/(\alpha_1^2 + \alpha_2^2)"
pl = plot(
ratio_squared,
theoretical_risks,
color = _main_cols,
linestyle = _linestyles,
ylim = ylim,
xguide = xlabel,
yguide = ylabel,
legend = legend,
label = labels,
background_color_legend = :transparent,
foreground_color_legend = :transparent,
grid = false,
title = title,
frame = :box,
plot_titlefontsize = 0.5,
thickness_scaling = 2.2,
legendfontsize = 12,
size = (650, 500),
)
plot!(
pl,
ratio_squared,
empirical_risks,
seriestype = :scatter,
color = _main_cols,
markershape = :utriangle,
markerstrokealpha = 0.0,
markersize = 4,
label = nothing,
)
pl
end
Random.seed!(10)
pgfplotsx()
using PGFPlotsX
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage{bm}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\blambda}{\bm{\lambda}}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\risk}[1]{\bm{R}(#1)}")
nreps = 1
title_curve_1 = L"\gamma_1 = \gamma_2 = \frac{1}{4},\;\; \alpha_1^2 + \alpha_2^2 = 1"
curve_1 = oracle_risk_plot(
[0.25, 0.25],
1.0,
legend = :topleft,
nreps = nreps,
title = title_curve_1,
)
plot!(curve_1, tex_output_standalone = true)
savefig(curve_1, "oracle_risk1.tikz")
function generate_risk_plots(base_plot_name; nreps=nreps, kwargs...)
title_curve_1 = L"\gamma_1 = \gamma_2 = \frac{1}{4},\;\; \alpha_1^2 + \alpha_2^2 = 1"
curve_1 = oracle_risk_plot([0.25, 0.25], 1.0, legend = :topleft, nreps = nreps, title = title_curve_1; kwargs...)
title_curve_2 = L"\gamma_1 = \frac{1}{10},\; \gamma_2 = \frac{4}{10},\;\; \alpha_1^2 + \alpha_2^2 = 1"
curve_2 = oracle_risk_plot([0.1, 0.4], 1.0, legend = nothing, nreps = nreps, title=title_curve_2; kwargs...)
title_curve_3 = L"\gamma_1 = \gamma_2 = 1,\;\; \alpha_1^2 + \alpha_2^2 = 1"
curve_3 = oracle_risk_plot([1.0, 1.0], 1.0, legend = nothing, nreps = nreps, title=title_curve_3; kwargs...)
title_curve_4 = L"\gamma_1 = \gamma_2 = \frac{1}{4},\;\; \alpha_1^2 + \alpha_2^2 = 2"
curve_4 = oracle_risk_plot([0.25, 0.25], 2.0, legend = nothing, nreps = nreps, title=title_curve_4; kwargs...)
title_curve_5 = L"\gamma_1 = \frac{1}{10},\; \gamma_2 = \frac{4}{10},\;\; \alpha_1^2 + \alpha_2^2 = 2"
curve_5 = oracle_risk_plot([0.1, 0.4], 2.0, legend = nothing, nreps = nreps, title=title_curve_5; kwargs...)
title_curve_6 = L"\gamma_1 = \gamma_2 = 1,\;\; \alpha_1^2 + \alpha_2^2 = 2"
curve_6 = oracle_risk_plot([1.0, 1.0], 2.0, legend = nothing, nreps = nreps, title=title_curve_6; kwargs...)
for (i, c) in enumerate([curve_1, curve_2, curve_3, curve_4, curve_5, curve_6])
savefig(c, "$(base_plot_name)$i.tikz")
end
end
generate_risk_plots("oracle_risk")
#ar1_block_design = BlockCovarianceDesign([AR1Design(ρ=0.95), AR1Design(ρ=0.95)])
exponential_design = BlockCovarianceDesign([ExponentialOrderStatsCovarianceDesign(rate=0.5),
ExponentialOrderStatsCovarianceDesign(rate=0.5)])
generate_risk_plots("exponential_covariance/oracle_risk"; design=exponential_design, ylim=(0,6))
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 3932 | using Pkg
Pkg.activate(@__DIR__)
using SigmaRidgeRegression
using StatsBase
using Statistics
using Random
using MLJ
using Distributions
using DrWatson
using JLD2
opt_no = parse(Int64, ARGS[1])
@show opt_no
# helper functions
function _merge(grp::GroupedFeatures; groups_goal = 2)
ngroups = length(grp.ps)
new_ps = Vector{Int64}(undef, groups_goal)
mod(ngroups, groups_goal) == 0 || throw("Only implemented when #groups_goal divides ngroups")
step_length = div(ngroups, groups_goal)
cnt = 1
for i=1:groups_goal
cnt_upper = cnt + (step_length - 1)
new_ps[i] = sum(grp.ps[cnt:cnt_upper])
cnt = cnt_upper
end
GroupedFeatures(new_ps)
end
function single_simulation(sim; Ks=Ks, save=true)
res = []
sim_name = randstring(16)
groups = sim.groups
_simulated_model = simulate(sim)
X = MLJ.matrix(_simulated_model.X)
Y = _simulated_model.Y
resampling_idx = _simulated_model.resampling_idx
bayes_λs = groups.ps .* var(sim.response_noise) ./ abs2.(sim.response_model.αs) ./ sim.ntrain
bayes_ridge = MultiGroupRidgeRegressor(groups, bayes_λs; center=false, scale=false)
_mach = machine(bayes_ridge, X, Y)
_eval = evaluate!(_mach, resampling=resampling_idx, measure=l2)
mse_bayes = _eval.measurement[1]
for K in Ks
newgroups = _merge(groups; groups_goal = K)
single_ridge = SingleGroupRidgeRegressor(groups=newgroups, λ=1.0, center=false, scale=false)
loo_single_ridge = LooRidgeRegressor(ridge = deepcopy(single_ridge))
sigma_ridge = SigmaRidgeRegressor(groups=newgroups, σ=0.01, center=false, scale=false)
loo_sigmaridge = LooRidgeRegressor(ridge=deepcopy(sigma_ridge), tuning=SigmaRidgeRegression.DefaultTuning(scale=:linear, param_min_ratio=0.001))
multi_ridge = MultiGroupRidgeRegressor(newgroups; center=false, scale=false)
loo_multi_ridge = LooRidgeRegressor(ridge = deepcopy(multi_ridge), rng=MersenneTwister(1))
glasso = GroupLassoRegressor(groups=newgroups, center=false, scale=false)
holdout_glasso = TunedRidgeRegressor(ridge=deepcopy(glasso), resampling= Holdout(shuffle=true, rng=1), tuning=DefaultTuning(param_min_ratio=1e-5))
models = [loo_sigmaridge, loo_single_ridge, loo_multi_ridge, holdout_glasso]
tmp_mses = fill(Inf, length(models))
for (model_idx, model) in enumerate(models)
_mach = machine(model, X, Y)
_eval = evaluate!(_mach, resampling=resampling_idx, measure=l2)
tmp_mses[model_idx] = _eval.measurement[1]
end
push!(res,
(mse_sigma = tmp_mses[1],
mse_single = tmp_mses[2],
mse_multi= tmp_mses[3],
mse_glasso = tmp_mses[4],
mse_bayes = mse_bayes,
K=K,
sim=sim,
p = sim.groups.p,
cov = sim.Σ,
response = sim.response_model,
sim_name = sim_name)
)
end
if save
@save "simulation_results/$(sim_name).jld2" res
end
res
end
# Code that starts simulations
#Varying K from 1 to 10How informative?n=p/2n−2p p= 1280informative vs uninformative.K= 2`,`= 0,...,Kbla
Ks = 2 .^ (1:5)
p = 32*25
ns = Int.([p/2; p; 2p])
groups = GroupedFeatures(fill(25, 32))
ar1 = SigmaRidgeRegression.AR1Design(p, 0.8)
id = IdentityCovarianceDesign(p)
#uninformative_response_model = RandomLinearResponseModel(αs = fill(1.0,32), grp = groups)
informative_response_model = RandomLinearResponseModel(αs = (0:31)./3.1, grp = groups)
all_opts = dict_list(Dict(:n => ns, :cov => [ar1, id]))
opt = all_opts[opt_no]
n = opt[:n]
@show n
Σ = opt[:cov]
@show Σ
nreps = 300
sim = GroupRidgeSimulationSettings(groups=groups, Σ=Σ, response_noise = Normal(0,5), response_model=informative_response_model, ntrain = n)
for i in Base.OneTo(nreps)
@show i
single_simulation(sim)
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 3499 | using Pkg
Pkg.activate(@__DIR__)
using FileIO
using DataFrames
using SigmaRidgeRegression
using Distributions
using LaTeXStrings
using Plots
using StatsPlots
using PGFPlotsX
pgfplotsx()
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage{amsmath}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage{amssymb}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage{bm}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\risk}[1]{\bm{R}(#1)}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\usepackage[bbgreekl]{mathbbol}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\sigmacv}{\bbsigma}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\bSigma}{\bm{\Sigma}}")
push!(PGFPlotsX.CUSTOM_PREAMBLE, raw"\newcommand{\bw}{\bm{w}}")
all_files = load.(readdir("simulation_results"))
all_files = load("simulation_results/loaded_files.jld2")["loaded_files"]
all_tuples = vcat([x["res"] for x in all_files]...)
df = DataFrame(all_tuples)
df.n = getfield.(df.sim, :ntrain)
summary_f = x-> median(x) .- 25 #25 is noise variance
gdf = groupby(df, [:cov,:n ,:K]) |>
df -> combine(df, :mse_sigma => summary_f => :mse_sigma,
:mse_single => summary_f => :mse_single,
:mse_multi => summary_f => :mse_multi,
:mse_glasso => summary_f => :mse_glasso,
:mse_bayes => summary_f => :mse_bayes,
nrow) |>
df -> groupby(df, [:n,:cov])
f_tbl_norm(i) = [gdf[i].mse_sigma gdf[i].mse_single gdf[i].mse_multi gdf[i].mse_glasso] ./ gdf[i].mse_bayes
f_tbl(i) = [gdf[i].mse_single gdf[i].mse_glasso gdf[i].mse_multi gdf[i].mse_sigma gdf[i].mse_bayes]
_cols = [:steelblue :green :orange :purple :grey]
_markers = [:utriangle :dtriangle :diamond :pentagon :circle]
_labels = ["Single Ridge" "Group Lasso" "Multi Ridge" L"\sigmacv\textrm{-Ridge}" "Bayes"]
_linestyles =[:dot :dashdot :dashdotdot :solid :dash]
plot_params = (frame = :box, grid=nothing,
color = _cols,
background_color_legend = :transparent,
foreground_color_legend = :transparent,
thickness_scaling = 2.3,
markershape = _markers,
ylabel = L"\risk{\widehat{\bw}} - \sigma^2",
linestyle = _linestyles,
xlabel=L"K",
xscale = :log2,
markeralpha=0.6,
size= (550, 440))
pl1= plot(gdf[1].K, f_tbl(1); label=_labels, legend=:topleft, title=L"\bSigma=\textrm{AR}(0.8),\;n=p/2", ylim=(0,650), plot_params...)
pl2= plot(gdf[2].K, f_tbl(2); label=nothing, title=L"\bSigma=\textrm{AR}(0.8),\;n=p", ylim=(0,220), plot_params...)
pl3= plot(gdf[3].K, f_tbl(3); label=nothing, title=L"\bSigma=\textrm{AR}(0.8),\;n=2p", ylim=(0,45), plot_params...)
pl4= plot(gdf[4].K, f_tbl(4); label=nothing, title=L"\bSigma=I,\; n=p/2", legend=:topleft, ylim=(0,650), plot_params... )
pl5= plot(gdf[5].K, f_tbl(5); label=nothing, title=L"\bSigma=I,\; n=p", ylim=(0,220), plot_params... )
pl6= plot(gdf[6].K, f_tbl(6); label=nothing, title=L"\bSigma=I,\; n=2p", ylim=(0,45), plot_params... )
savefig(pl1, "simulations_ar_phalf.tikz")
savefig(pl2, "simulations_ar_p.tikz")
savefig(pl3, "simulations_ar_ptwice.tikz")
savefig(pl4, "simulations_id_phalf.tikz")
savefig(pl5, "simulations_id_p.tikz")
savefig(pl6, "simulations_id_ptwice.tikz")
pl_ar= plot(pl1,pl2,pl3, size=(1150,280), layout=(1,3))
savefig(pl_ar, "simulations_ar.tikz")
pl_id= plot(pl4,pl5,pl6, size=(1150,280), layout=(1,3))
pl = plot(pl4,pl5,pl6,pl1,pl2,pl3, size=(1200,800), layout=(2,3))
savefig(pl_ar, "simulations_identity.tikz")
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 1413 | # This file provides reproducible code for the extraction of SigmaRidgeRegression.CLLData
# from the R/Bioconductor MOFA package.
# A R installation (it will be called through `RCall`) is required
# with an installation of the `MOFAdata` package.
# This package may be installed from within `R` as follows:
# ```r
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
#BiocManager::install("MOFAdata")
#```
using JLD2
using RCall
R"""
data("CLL_data", package="MOFAdata")
# use methylation data, gene expression data and drug responses as predictors
CLL_data <- CLL_data[1:3]
CLL_data <- lapply(CLL_data,t)
ngr <- sapply(CLL_data,ncol)
CLL_data <- Reduce(cbind, CLL_data)
#only include patient samples profiles in all three omics
CLL_data2 <- CLL_data[apply(CLL_data,1, function(p) !any(is.na(p))),]
dim(CLL_data2)
# prepare design matrix and response
X <- CLL_data2[,!grepl("D_002", colnames(CLL_data))]
y <- rowMeans(CLL_data2[,grepl("D_002", colnames(CLL_data))])
annot <- rep(1:3, times = ngr-c(5,0,0)) # group annotations to drugs, meth and RNA
ngr_prime <- ngr-c(5,0,0)
"""
# run with seed from Velten & Huber
R"""
set.seed(9876)
foldid <- sample(rep(seq(10), length=nrow(X)))
"""
@rget foldid
@rget X
@rget y
@rget ngr_prime
cll_data = (X = X, y = y, ngr = Int.(ngr_prime), foldid = foldid)
JLD2.@save "cll_data.jld2" {compress=true} cll_data
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 2416 | module SigmaRidgeRegression
using BlockDiagonals
using Distributions
using Expectations
using FillArrays #not used yet
using FiniteDifferences
using LinearAlgebra
import MLJModelInterface
const MMI = MLJModelInterface
import MLJ
import MLJTuning
using MutableNamedTuples
using Random
using Roots
using Setfield
using StatsBase
using Tables
using UnPack
using WoodburyMatrices
import Base.\
import Base: reduce, rand
import LinearAlgebra: ldiv!
import StatsBase: fit!, fit, coef, islinear, leverage, modelmatrix, response, predict
import WoodburyMatrices: _ldiv!
#---------- piracy ---------------------------------------------------------
MMI.nrows(X::Tables.MatrixTable) = size(MMI.matrix(X), 1)
MMI.selectrows(X::Tables.MatrixTable, ::Colon) = X
MMI.selectrows(X::Tables.MatrixTable, r::Integer) =
MMI.selectrows(X::Tables.MatrixTable, r:r)
function MMI.selectrows(X::Tables.MatrixTable, r)
new_matrix = MMI.matrix(X)[r, :]
_names = getfield(X, :names)
MMI.table(new_matrix; names = _names)
end
#----------------------------------------------------------------------------
include("nnls.jl")
include("utils.jl")
include("groupedfeatures.jl")
include("blockridge.jl")
include("end_to_end.jl")
include("covariance_design.jl")
include("simulations.jl")
include("theoretical_risk_curves.jl")
include("mmi.jl")
include("mmi_sigmaridge.jl")
include("grouplasso.jl")
include("datasets/CLLData/CLLData.jl")
export GroupedFeatures,
ngroups,
group_idx,
group_summary,
group_expand,
random_betas,
CholeskyRidgePredictor,
WoodburyRidgePredictor,
BasicGroupRidgeWorkspace,
MomentTunerSetup,
get_αs_squared,
get_λs,
loo_error,
mse_ridge,
σ_squared_max,
sigma_squared_path,
CovarianceDesign,
nfeatures,
get_Σ,
spectrum,
simulate_rotated_design,
AR1Design,
set_groups,
DiagonalCovarianceDesign,
IdentityCovarianceDesign,
UniformScalingCovarianceDesign,
ExponentialOrderStatsCovarianceDesign,
BlockCovarianceDesign,
simulate,
GroupRidgeSimulationSettings,
RandomLinearResponseModel,
optimal_risk,
optimal_single_λ_risk,
optimal_ignore_second_group_risk,
SingleGroupRidgeRegressor,
MultiGroupRidgeRegressor,
LooRidgeRegressor,
TunedRidgeRegressor,
SigmaRidgeRegressor,
GroupLassoRegressor,
DefaultTuning,
LooSigmaRidgeRegressor
end # module
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 7734 | """
`AbstractRidgePredictor` is supposed to implement the interface
* `update_λs!`
* `trace_XtX`
* `XtXpΛ_ldiv_XtX`
* `LinearAlgebra.ldiv!`
* `Base.\`
Concrete subtypes available are `CholeskyRidgePredictor` and
`WoodburyRidgePredictor`.
"""
abstract type AbstractRidgePredictor end
"""
Used typically for p < n.
"""
struct CholeskyRidgePredictor{M<:AbstractMatrix,SYM<:Symmetric,C<:Cholesky} <:
AbstractRidgePredictor
X::M
XtX::SYM
XtXpΛ::SYM
XtXpΛ_chol::C
end
function CholeskyRidgePredictor(X)
(n, p) = size(X)
XtX = Symmetric(X' * X ./ n)
XtXpΛ = XtX + 1.0 * I
XtXpΛ_chol = cholesky!(XtXpΛ)
CholeskyRidgePredictor(X, XtX, XtXpΛ, XtXpΛ_chol)
end
function update_λs!(chol::CholeskyRidgePredictor, groups, λs)
chol.XtXpΛ .= Symmetric(chol.XtX + Diagonal(group_expand(groups, λs)))
cholesky!(chol.XtXpΛ)
end
function ldiv!(A, chol::CholeskyRidgePredictor, B)
ldiv!(A, chol.XtXpΛ_chol, B)
end
function \(chol::CholeskyRidgePredictor, B)
chol.XtXpΛ_chol \ B
end
function XtXpΛ_ldiv_XtX(chol::CholeskyRidgePredictor)
chol.XtXpΛ_chol \ chol.XtX
end
function trace_XtX(chol::CholeskyRidgePredictor)
tr(chol.XtX)
end
"""
Used typically for p >> n and n reasonably small
"""
struct WoodburyRidgePredictor{M<:AbstractMatrix,S<:SymWoodbury} <:
SigmaRidgeRegression.AbstractRidgePredictor
X::M
wdb::S
end
function WoodburyRidgePredictor(X)
(n, p) = size(X)
wdb = SymWoodbury(1.0 * I(p), X', I(n) / n)
WoodburyRidgePredictor(X, wdb)
end
#It would be very useful if there could be an implementation of
#```julia
#ldiv!(dest::AbstracMatrix, W::AbstractWoodbury, B::AbstractMatrix)
#```
#Right now I think this only works with `AbstractVector`. Before implementing and filing a pull request, I was wondering whether you think it is an OK approach to
#```julia
# for i=1:ncols
# ldiv!(view(dest, :, i), A, view(B,:,i))
#end
#```
#------------------------------------------------------------------------
# TODO: Fix the following two things upstream on WoodburyMatrices.jl
#------------------------------------------------------------------------
function _ldiv!(dest, W::SymWoodbury, A::Diagonal, B)
WoodburyMatrices.myldiv!(W.tmpN1, A, B)
mul!(W.tmpk1, W.V, W.tmpN1)
mul!(W.tmpk2, W.Cp, W.tmpk1)
mul!(W.tmpN2, W.U, W.tmpk2)
WoodburyMatrices.myldiv!(A, W.tmpN2)
for i = 1:length(W.tmpN2)
@inbounds dest[i] = W.tmpN1[i] - W.tmpN2[i]
end
return dest
end
#-----------------------------------------------------------------------
function LinearAlgebra.ldiv!(Y::AbstractMatrix, A::SymWoodbury, B::AbstractMatrix)
ncols = size(B, 2)
for i = 1:ncols
ldiv!(view(Y, :, i), A, view(B, :, i))
end
Y
end
#-----------------------------------------------------------------------
function update_λs!(wbpred::WoodburyRidgePredictor, groups, λs)
wdb = wbpred.wdb
n = size(wdb.D, 1)
A = Diagonal(group_expand(groups, λs))
wdb.A .= A
wdb.Dp .= inv(n * I + wdb.B' * (A \ wdb.B))
end
function ldiv!(A, wbpred::WoodburyRidgePredictor, B)
ldiv!(A, wbpred.wdb, B)
end
function \(wbpred::WoodburyRidgePredictor, B)
wbpred.wdb \ B
end
function XtXpΛ_ldiv_XtX(wbpred::WoodburyRidgePredictor)
n = size(wbpred.X, 1)
(wbpred.wdb \ wbpred.X') * wbpred.X ./ n
end
function trace_XtX(wbpred::WoodburyRidgePredictor)
n = size(wbpred.X, 1)
# recall XtX here really is XtX/n
tr(wbpred.X' * wbpred.X) / n #make more efficient later.
end
Base.@kwdef mutable struct BasicGroupRidgeWorkspace{
CP<:AbstractRidgePredictor,
M<:AbstractMatrix,
V<:AbstractVector,
}
X::M
Y::V
groups::GroupedFeatures
n::Integer = size(X, 1)
p::Integer = size(X, 2)
λs::V = ones(groups.num_groups)
XtY::V = X' * Y ./ n
XtXpΛ_chol::CP = CholeskyRidgePredictor(X)
XtXpΛ_div_Xt::M = XtXpΛ_chol \ X' .\ n
β_curr::V = XtXpΛ_chol \ XtY
leverage_store::V = zeros(n)
Y_hat::V = X * β_curr
cache = nothing
end
ngroups(rdg::BasicGroupRidgeWorkspace) = ngroups(rdg.groups)
# StatsBase.jl interace
coef(rdg::BasicGroupRidgeWorkspace) = rdg.β_curr
islinear(rdg::BasicGroupRidgeWorkspace) = true
leverage(rdg::BasicGroupRidgeWorkspace) = rdg.leverage_store
modelmatrix(rdg::BasicGroupRidgeWorkspace) = rdg.X
predict(rdg::BasicGroupRidgeWorkspace, X) = X * coef(rdg)
response(rdg::BasicGroupRidgeWorkspace) = rdg.Y
function loo_error(rdg::BasicGroupRidgeWorkspace)
mean(abs2.((rdg.Y .- rdg.Y_hat) ./ (1.0 .- rdg.leverage_store)))
end
function mse_ridge(rdg::BasicGroupRidgeWorkspace, X_test, Y_test)
mean(abs2.(Y_test - X_test * rdg.β_curr))
end
function StatsBase.fit!(rdg::BasicGroupRidgeWorkspace, λs)
λs = isa(λs, MutableNamedTuple) ? collect(values(λs)) : λs
rdg.λs .= λs
update_λs!(rdg.XtXpΛ_chol, rdg.groups, λs)
#rdg.XtXpΛ .= Symmetric(rdg.XtX + Diagonal(group_expand(rdg.groups, λs)))
#cholesky!(rdg.XtXpΛ)
ldiv!(rdg.β_curr, rdg.XtXpΛ_chol, rdg.XtY)
mul!(rdg.Y_hat, rdg.X, rdg.β_curr)
ldiv!(rdg.XtXpΛ_div_Xt, rdg.XtXpΛ_chol, rdg.X')
rdg.XtXpΛ_div_Xt ./= rdg.n
_prod_diagonals!(rdg.leverage_store, rdg.X, rdg.XtXpΛ_div_Xt)
loo_error(rdg)
end
"""
λωλας_λ(rdg; multiplier=0.1)
Implements the Panagiotis Lolas rule of thumb for picking an optimal λ.
"""
function λωλας_λ(rdg; multiplier = 0.1)
multiplier * rdg.p^2 / rdg.n / trace_XtX(rdg.XtXpΛ_chol) #TODO 2s
end
#function max_σ_squared(rdg)
# mean(abs2, rdg.Y)
#end
# Tuning through Moment Fitting
Base.@kwdef struct MomentTunerSetup{
IV<:AbstractVector,
FV<:AbstractVector,
FM<:AbstractMatrix,
}
ps::IV
n::Integer
beta_norms_squared::FV
N_norms_squared::FV
M_squared::FM
end
function MomentTunerSetup(rdg::BasicGroupRidgeWorkspace)
grps = rdg.groups
n = rdg.n
ps = grps.ps
ngroups = grps.num_groups
beta_norms_squared = group_summary(grps, rdg.β_curr, x -> sum(abs2, x))
N_matrix = rdg.XtXpΛ_div_Xt #sqrt(n)*N from paper
M_matrix = XtXpΛ_ldiv_XtX(rdg.XtXpΛ_chol) #TODO 1
N_norms_squared = Vector{eltype(beta_norms_squared)}(undef, ngroups)
M_squared = Matrix{eltype(beta_norms_squared)}(undef, ngroups, ngroups)
for g = 1:ngroups
N_norms_squared[g] = sum(abs2, N_matrix[group_idx(grps, g), :])
for h = 1:ngroups
# Mij is entry (j,i) and dived by p_i
M_squared[h, g] = sum(abs2, M_matrix[group_idx(grps, g), group_idx(grps, h)])
end
end
MomentTunerSetup(
ps = ps,
n = n,
beta_norms_squared = beta_norms_squared,
N_norms_squared = N_norms_squared,
M_squared = M_squared,
)
end
function σ_squared_max(mom::MomentTunerSetup)
u = mom.beta_norms_squared
v = mom.N_norms_squared
maximum(u ./ v)
end
function get_αs_squared(mom::MomentTunerSetup, σ_squared)
rhs = mom.beta_norms_squared .- σ_squared .* mom.N_norms_squared
α_sq_by_p = vec(nonneg_lsq(mom.M_squared, rhs; alg = :fnnls)) # mom.M_squared\rhs\
α_sq_by_p .* mom.ps
end
function get_λs(mom::MomentTunerSetup, σ_squared)
αs_squared = get_αs_squared(mom, σ_squared)
γs = mom.ps ./ mom.n
σ_squared .* γs ./ αs_squared
end
function sigma_squared_path(
rdg::BasicGroupRidgeWorkspace,
mom::MomentTunerSetup,
σs_squared,
)
n_σs = length(σs_squared)
n_groups = ngroups(rdg)
loos_hat = zeros(n_σs)
λs = zeros(n_σs, n_groups)
βs = zeros(n_σs, rdg.groups.p)
for (i, σ_squared) in enumerate(σs_squared)
λs_tmp = get_λs(mom, σ_squared)
λs[i, :] = λs_tmp
loos_hat[i] = fit!(rdg, λs_tmp)
βs[i, :] = rdg.β_curr
end
(λs = λs, loos = loos_hat, βs = βs)
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 3236 | abstract type CovarianceDesign{T} end
get_Σ(mat) = mat
nfeatures(mat) = size(mat, 1)
function spectrum(mat)
eigs = eigvals(mat)
probs = fill(1/length(eigs), length(eigs))
DiscreteNonParametric(eigs, probs)
end
spectrum(cov::CovarianceDesign) = spectrum(get_Σ(cov))
nfeatures(cov::CovarianceDesign) = cov.p
function simulate_rotated_design(cov, n; rotated_measure = Normal())
Σ = get_Σ(cov)
Σ_chol = cholesky(Σ)
p = nfeatures(cov)
Z = rand(rotated_measure, n, p)
X = randn(n, p) * Σ_chol.UL
X
end
Base.@kwdef struct AR1Design{P<:Union{Missing,Int}} <: CovarianceDesign{P}
p::P = missing
ρ = 0.7
end
function get_Σ(cov::AR1Design{Int})
p = nfeatures(cov)
ρ = cov.ρ
Σ = [ρ^(abs(i - j)) for i = 1:p, j = 1:p]
Σ
end
abstract type DiagonalCovarianceDesign{T} <: CovarianceDesign{T} end
Base.@kwdef struct IdentityCovarianceDesign{P<:Union{Missing,Int}} <:
DiagonalCovarianceDesign{P}
p::P = missing
end
spectrum(::IdentityCovarianceDesign) = DiscreteNonParametric([1.0],[1.0])
function get_Σ(cov::IdentityCovarianceDesign{Int})
I(cov.p)
end
Base.@kwdef struct UniformScalingCovarianceDesign{P<:Union{Missing,Int}} <:
DiagonalCovarianceDesign{P}
scaling::Float64 = 1.0
p::P = missing
end
spectrum(unif::UniformScalingCovarianceDesign) = DiscreteNonParametric([unif.scaling],[1.0])
function get_Σ(cov::UniformScalingCovarianceDesign{Int})
(cov.scaling * I)(cov.p)
end
Base.@kwdef struct ExponentialOrderStatsCovarianceDesign{P<:Union{Missing,Int}} <:
DiagonalCovarianceDesign{P}
p::P = missing
rate::Float64
end
function spectrum(cov::ExponentialOrderStatsCovarianceDesign)
p = cov.p
rate = cov.rate
tmp = range(1 / (2p); stop = 1 - 1 / (2p), length = p)
eigs = 1 / rate .* log.(1 ./ tmp)
DiscreteNonParametric(eigs, fill(1/p, p))
end
get_Σ(cov::ExponentialOrderStatsCovarianceDesign) = Diagonal(support(spectrum(cov)))
struct BlockCovarianceDesign{T, S <: CovarianceDesign{T}, G} <: CovarianceDesign{T}
blocks::Vector{S}
groups::G
end
function BlockCovarianceDesign(blocks::Vector{S}) where S<:CovarianceDesign{Missing}
BlockCovarianceDesign(blocks, missing)
end
nfeatures(cov::BlockCovarianceDesign) = sum(nfeatures.(cov.blocks))
function get_Σ(blockdesign::BlockCovarianceDesign)
BlockDiagonal(get_Σ.(blockdesign.blocks))
end
function spectrum(blockdesign::BlockCovarianceDesign)
@unpack blocks, groups = blockdesign
spectra = spectrum.(blocks)
mixing_prop = groups.ps ./ groups.p
MixtureModel(spectra, mixing_prop)
end
function simulate_rotated_design(cov::BlockCovarianceDesign, n; rotated_measure = Normal())
hcat(simulate_rotated_design.(cov.blocks, n; rotated_measure=rotated_measure)...)
end
# Set groups
function set_groups(design::CovarianceDesign, p::Integer)
@set design.p = p
end
function set_groups(design::CovarianceDesign, groups::GroupedFeatures)
set_groups(design, nfeatures(groups))
end
function set_groups(blockdesign::BlockCovarianceDesign, groups::GroupedFeatures)
updated_blocks = set_groups.(blockdesign.blocks, groups.ps)
BlockCovarianceDesign(updated_blocks, groups)
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 847 | abstract type AbstractGroupRegressor <: MMI.Deterministic end
abstract type AbstractGroupRidgeRegressor <: AbstractGroupRegressor end
function StatsBase.fit(grp_ridge::AbstractGroupRidgeRegressor, X, Y, grp::GroupedFeatures)
decomposition = grp_ridge.decomposition
tuning = _main_hyperparameter_value(grp_ridge)
nobs = length(Y)
if decomposition === :default
decomposition = (nfeatures(grp) <= 4*nobs) ? :cholesky : :woodbury
end
if decomposition === :cholesky
pred = CholeskyRidgePredictor(X)
elseif decomposition === :woodbury
pred = WoodburyRidgePredictor(X)
else
"Only :default, :cholesky and :woodbury currently supported"
end
workspace = BasicGroupRidgeWorkspace(X = X, Y = Y, groups = grp, XtXpΛ_chol = pred)
StatsBase.fit!(workspace, tuning)
workspace
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 1317 |
"""
GroupedFeatures(ps::AbstractVector{Int})
A type representing groups of features, wherein the first `ps[1]` features are one group,
the next `ps[2]` features are the second group and so forth.
"""
struct GroupedFeatures{IV<:AbstractVector{Int}}
ps::IV
p::Int
num_groups::Int
end
function GroupedFeatures(; group_size::Int, num_groups::Int)
GroupedFeatures(fill(group_size, num_groups))
end
GroupedFeatures(ps) = GroupedFeatures(ps, sum(ps), length(ps))
ngroups(gr::GroupedFeatures) = gr.num_groups
nfeatures(gr::GroupedFeatures) = gr.p
function group_idx(gr::GroupedFeatures, i::Integer)
starts = cumsum([1; gr.ps])[1:end-1]
ends = cumsum(gr.ps)
starts[i]:ends[i]
end
function group_summary(gr::GroupedFeatures, vec::AbstractVector, f)
ps = gr.ps
num_groups = gr.num_groups
starts = cumsum([1; ps])[1:end-1]
ends = cumsum(ps)
el = eltype(f(vec))
output = Vector{el}(undef, num_groups)
for g = 1:num_groups
output[g] = f(vec[starts[g]:ends[g]])
end
output
end
function group_expand(gr::GroupedFeatures, vec::AbstractVector)
arr = zeros(eltype(vec), gr.p)
for i = 1:gr.num_groups
arr[group_idx(gr, i)] .= vec[i]
end
arr
end
function group_expand(gr::GroupedFeatures, el::Number)
fill(el, gr.p)
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 3425 | Base.@kwdef mutable struct GroupLassoRegressor{G,P,T<:Number} <: AbstractGroupRegressor
decomposition::Symbol = :default
groups::G
groups_multiplier::P = sqrt.(groups.ps) ./ sqrt(groups.p)
λ::T = 1.0
center::Bool = true
scale::Bool = true
maxiter::Int = 100
η_reg::T = 1e-5
η_threshold::T = 1e-2
abs_tol::T = 1e-4
truncate_to_zero::Bool = true
end
_main_hyperparameter(::GroupLassoRegressor) = :λ
function _default_hyperparameter_maximum(model::GroupLassoRegressor, fitted_machine)
@unpack groups, groups_multiplier = model
_norms = group_summary(groups, _workspace(fitted_machine.cache).XtY, norm)
maximum(_norms ./ groups_multiplier)
end
_default_param_min_ratio(::GroupLassoRegressor, fitted_machine) = 1e-5
function _glasso_fit!(workspace, glasso::GroupLassoRegressor)
@unpack η_reg, η_threshold, abs_tol, groups, maxiter, λ, groups_multiplier, truncate_to_zero = glasso
tmp_λs = copy(workspace.λs)
ηs_new = group_summary(groups, StatsBase.coef(workspace), norm)
ηs_old = copy(ηs_new)
converged = false
iter_cnt = 0
for i = 1:maxiter
tmp_λs .= λ .* groups_multiplier ./ sqrt.(abs2.(ηs_new) .+ η_reg)
fit!(workspace, tmp_λs)
ηs_new .= group_summary(groups, StatsBase.coef(workspace), norm)
#converged = norm(ηs_new .- ηs_old, Inf) < abs_tol
#@show (ηs_new .- ηs_old) ./ sqrt.(abs2.(ηs_old) .+ η_reg)
#@show (ηs_new .- ηs_old)
#@show sqrt.( abs2.(ηs_old) .+ η_reg)
converged =
(norm((ηs_new .- ηs_old) ./ sqrt.(abs2.(ηs_old) .+ η_reg), Inf) < abs_tol) ||
(norm(ηs_new .- ηs_old, Inf) < abs_tol)
ηs_old .= ηs_new
iter_cnt += 1
converged && break
end
#@show "conv"
ηs = group_summary(groups, StatsBase.coef(workspace), norm)
final_λs = deepcopy(workspace.λs)
#zero_groups = group_summary(groups, StatsBase.coef(workspace), norm) .< η_threshold .* groups_multiplier
#final_λs[zero_groups] .= Inf
#fit!(workspace, final_λs)
(workspace = workspace, converged = converged, iter_count = iter_cnt)
end
function MMI.fit(m::GroupLassoRegressor, verb::Int, X, y)
@unpack decomposition, center, scale = m
Xmatrix = MMI.matrix(X)
p = size(Xmatrix, 2)
m_tmp = MultiGroupRidgeRegressor(;
groups = m.groups,
decomposition = decomposition,
scale = scale,
center = center,
)
multiridge_machine = MLJ.machine(m_tmp, X, y)
fit!(multiridge_machine)
workspace = _workspace(multiridge_machine.cache)
glasso_workspace = _glasso_fit!(workspace, m)
βs = StatsBase.coef(glasso_workspace.workspace)
x_transform = multiridge_machine.fitresult.x_transform
y_transform = multiridge_machine.fitresult.y_transform
fitresult = (coef = βs, x_transform = x_transform, y_transform = y_transform)
# return
return fitresult, glasso_workspace, NamedTuple{}()
end
function MMI.update(
model::GroupLassoRegressor,
verbosity::Int,
old_fitresult,
old_cache,
X,
y,
)
glasso_workspace = _glasso_fit!(old_cache.workspace, model)
βs = StatsBase.coef(glasso_workspace.workspace)
x_transform = old_fitresult.x_transform
y_transform = old_fitresult.y_transform
fitresult = (coef = βs, x_transform = x_transform, y_transform = y_transform)
return fitresult, glasso_workspace, NamedTuple{}()
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 10825 | abstract type FixedLambdaGroupRidgeRegressor <: AbstractGroupRidgeRegressor end
"""
SingleGroupRidgeRegressor(; λ,
decomposition = :default,
center = true,
scale = true)
Type representing vanilla Ridge regression with hyperparameter `λ`.
`center` and `scale` (default `true` for both) control whether the response and
features should be centered and scaled first (make sure that `center=true` if the
model is supposed to have an intercept!). `decomposition` can be one of `:default`,
`:cholesky` or `:woodbury` and determines how the linear system is solved.
"""
Base.@kwdef mutable struct SingleGroupRidgeRegressor{T,G} <: FixedLambdaGroupRidgeRegressor
decomposition::Symbol = :default
λ::T = 1.0
groups::G = nothing
center::Bool = true
scale::Bool = true
end
_main_hyperparameter(::SingleGroupRidgeRegressor) = :λ
_main_hyperparameter_value(m) = getproperty(m, _main_hyperparameter(m))
function _default_hyperparameter_maximum(
model::FixedLambdaGroupRidgeRegressor,
fitted_machine,
)
1000 * maximum(abs.(fitted_machine.cache.XtY))
end
function _default_param_min_ratio(ridge, fitted_machine)
1e-6
end
function _default_scale(ridge, fitted_machine)
:log10
end
function _groups(m::SingleGroupRidgeRegressor, p)
isnothing(m.groups) ? GroupedFeatures([p]) : m.groups
end
function MMI.fit(m::FixedLambdaGroupRidgeRegressor, verb::Int, X, y)
@unpack center, scale, groups = m
Xmatrix = MMI.matrix(X)
if center || scale
x_transform = StatsBase.fit(
ZScoreTransform,
Xmatrix;
dims = 1,
center = center,
scale = scale,
)
y_transform =
StatsBase.fit(ZScoreTransform, y; dims = 1, center = center, scale = scale)
Xmatrix = StatsBase.transform(x_transform, Xmatrix)
y = StatsBase.transform(y_transform, y)
else
x_transform = nothing
y_transform = nothing
end
p = size(Xmatrix, 2)
groups = _groups(m, p)
workspace = StatsBase.fit(m, Xmatrix, y, groups) # see end_to_end.jl
βs = StatsBase.coef(workspace)
fitresult = (coef = βs, x_transform = x_transform, y_transform = y_transform)
# return
return fitresult, workspace, NamedTuple{}()
end
function MMI.update(
model::AbstractGroupRidgeRegressor,
verbosity::Int,
old_fitresult,
old_cache,
X,
y,
)
new_λ = _main_hyperparameter_value(model)
StatsBase.fit!(old_cache, new_λ)
βs = StatsBase.coef(old_cache)
fitresult = (
coef = βs,
x_transform = old_fitresult.x_transform,
y_transform = old_fitresult.y_transform,
)
return fitresult, old_cache, NamedTuple{}()
end
function MMI.predict(model::AbstractGroupRegressor, fitresult, Xnew)
Xnew = MMI.matrix(Xnew)
@unpack coef, x_transform, y_transform = fitresult
!isnothing(x_transform) && (Xnew = StatsBase.transform(x_transform, Xnew))
ypred = Xnew * coef
!isnothing(y_transform) && StatsBase.reconstruct!(y_transform, ypred)
ypred
end
function range_and_grid(
ridge::AbstractGroupRegressor,
param_min,
param_max,
scale,
resolution,
n,
rng
)
param_symbol = _main_hyperparameter(ridge)
param_range =
range(ridge, param_symbol, lower = param_min, upper = param_max, scale = scale)
model_grid =
MLJTuning.grid(ridge, [param_symbol], [MLJ.iterator(param_range, resolution)])
if length(model_grid) > n
model_grid = sample(rng, model_grid, n; replace = false)
end
param_range, model_grid
end
"""
MultiGroupRidgeRegressor(; decomposition, λ, groups)
"""
mutable struct MultiGroupRidgeRegressor{T,G<:GroupedFeatures} <:
FixedLambdaGroupRidgeRegressor
decomposition::Symbol
λs::T #Named tuple
groups::G
center::Bool
scale::Bool
end
_main_hyperparameter(::MultiGroupRidgeRegressor) = :λs
_groups(m::MultiGroupRidgeRegressor, p) = m.groups
function MultiGroupRidgeRegressor(;
groups::GroupedFeatures,
λs::AbstractVector = ones(ngroups(groups)),
decomposition = :default,
center = true,
scale = true,
)
ngr = ngroups(groups)
λ_expr = Tuple(Symbol.(:λ, Base.OneTo(ngr)))
λ_tupl = MutableNamedTuple{λ_expr}(tuple(λs...))
MultiGroupRidgeRegressor(decomposition, λ_tupl, groups, center, scale)
end
function range_and_grid(ridge::MultiGroupRidgeRegressor, λ_min, λ_max, scale, resolution, n, rng)
λ_names = [Meta.parse("(λs.$λ)") for λ in keys(ridge.λs)]
nparams = length(λ_names)
λ_range =
[range(ridge, λ, lower = λ_min, upper = λ_max, scale = scale) for λ in λ_names]
λ_product_grid = MLJ.iterator.(λ_range, resolution)
if nparams*log(resolution) > log(n)
tmp_idx = zeros(Int, nparams)
model_grid = [deepcopy(ridge) for i in Base.OneTo(n)]
for i in Base.OneTo(n)
sample!(rng, 1:resolution, tmp_idx)
clone = model_grid[i]
for k in eachindex(λ_names)
MLJ.recursive_setproperty!(clone, λ_names[k], λ_product_grid[k][tmp_idx[k]])
end
end
else
model_grid = MLJTuning.grid(ridge, λ_names, λ_product_grid)
end
λ_range, model_grid
end
"""
DefaultTuning(resolution, n, param_min_ratio, param_max, scale)
Determines the default set of hyperparameters to loop over when tuning a
`AbstractGroupRidgeRegressor` method. Parameters are chosen on a grid
that is equidistant in `scale` (e.g. `:log10` or `:linear` or `:default`) with number
of points given by `resolution` (default `100`) that ranges from `param_min_ratio*param_max` to
`param_max`. Both `param_min_ratio` and `param_max` can be specified as `:default`,
in which case a method specific default choice will be used.
If there are multiple hyperparameters (say `d`),
then the above rules are used componentwise. `n` (default `1000`)
is the largest number of hyperparameters to explore (if `resolution^d > n`,
then the parameters are randomly subsampled to `n` of them).
"""
Base.@kwdef struct DefaultTuning{T,M}
resolution::Int = 100
n::Int = 1000
param_min_ratio::M = :default
param_max::T = :default
scale = :default
end
function _tuning_grid(tuning::DefaultTuning, model, fitted_machine, rng)
@unpack resolution, n, scale = tuning
if tuning.param_max === :default
param_max = _default_hyperparameter_maximum(model, fitted_machine)
elseif isa(tuning.param_max, Number)
param_max = tuning.param_max
else
error("param_max can be :default or a number only.")
end
if tuning.param_min_ratio === :default
param_min_ratio = _default_param_min_ratio(model, fitted_machine)
elseif isa(tuning.param_min_ratio, Number)
param_min_ratio = tuning.param_min_ratio
else
error("param_min_ratio can be :default or a number only.")
end
if tuning.scale === :default
_scale = _default_scale(model, fitted_machine)
else
_scale = tuning.scale
end
param_min = param_min_ratio * param_max
param_range, model_grid = range_and_grid(model, param_min, param_max, _scale, resolution, n, rng)
param_range, model_grid, param_max
end
"""
LooRidgeRegressor(;ridge,
tuning = DefaultTuning(),
rng = Random.GLOBAL_RNG)
A MLJ model that wraps a `ridge` model such as `SigmaRidgeRegressor` and tunes
its parameters by leave-one-out-cross-validation with `tuning` settings defaulting to
[`DefaultTuning`](@ref). In case there is randomness in choosing the search space of
hyperparameters, then the `rng` may be specified (defaults to `Random.GLOBAL_RNG`).
"""
Base.@kwdef mutable struct LooRidgeRegressor{G,T} <: AbstractGroupRidgeRegressor
ridge::G = SingleGroupRidgeRegressor()
tuning::T = DefaultTuning()
rng = Random.GLOBAL_RNG
end
_groups(loo::LooRidgeRegressor) = _groups(loo.ridge)
_workspace(wk::BasicGroupRidgeWorkspace) = wk
_workspace(wk) = wk.workspace
function MMI.fit(m::LooRidgeRegressor, verb::Int, X, y)
ridge = m.ridge
mach = MLJ.machine(ridge, X, y)
fit!(mach)
x_transform = mach.fitresult.x_transform
y_transform = mach.fitresult.y_transform
ridge_workspace = _workspace(mach.cache)
param_range, model_grid, param_max = _tuning_grid(m.tuning, ridge, mach, m.rng)
history = map(model_grid) do newm
param = _main_hyperparameter_value(newm)
mach.model = newm
fit!(mach; verbosity = 0)
λ = deepcopy(ridge_workspace.λs)
loo = loo_error(ridge_workspace)
(loo = loo, param = param, model = newm, λ = λ)
end
loos = [h.loo for h in history]
params = [h.param for h in history]
λs = [h.λ for h in history]
best_model_idx = argmin(loos)
best_model = model_grid[best_model_idx]
best_param = params[best_model_idx]
best_loo = loos[best_model_idx]
best_λs = λs[best_model_idx]
report = (
best_model = best_model,
best_param = best_param,
best_λs = best_λs,
best_loo = best_loo,
loos = loos,
λs = λs,
params = params,
param_max = param_max,
param_range = param_range,
)
mach.model = best_model
fit!(mach)
βs = StatsBase.coef(ridge_workspace)
fitresult = (coef = βs, x_transform = x_transform, y_transform = y_transform)
# return
return fitresult, ridge_workspace, report
end
Base.@kwdef mutable struct TunedRidgeRegressor{G,R,M,T} <: AbstractGroupRidgeRegressor
ridge::G = SingleGroupRidgeRegressor(decomposition = :cholesky, λ = 1.0)
tuning::T = DefaultTuning()
resampling::R = MLJ.CV(nfolds = 5, shuffle=true)
measure::M = MLJ.l2
end
function MMI.fit(m::TunedRidgeRegressor, verb::Int, X, y)
ridge = m.ridge
mach = MLJ.machine(ridge, X, y)
fit!(mach)
#x_transform = mach.fitresult.x_transform
#y_transform = mach.fitresult.y_transform
ridge_workspace = _workspace(mach.cache)
param_range, model_grid, param_max = _tuning_grid(m.tuning, ridge, mach, m.resampling.rng)
tuned_model = MLJ.TunedModel(
model = ridge,
ranges = model_grid,
tuning = MLJ.Explicit(),
resampling = m.resampling,
measure = m.measure,
)
tuned_mach = MLJ.machine(tuned_model, X, y)
fit!(tuned_mach)
_fitresult = tuned_mach.fitresult.fitresult
_cache = tuned_mach.fitresult.cache
best_λs = deepcopy(_workspace(_cache).λs)
tunedreport = tuned_mach.report
best_param = _main_hyperparameter_value(tunedreport.best_model)
tunedreport = (tunedreport..., best_param=best_param, best_λs = best_λs)
# return
return _fitresult, _cache, tunedreport
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 4060 | """
SigmaRidgeRegressor(; decomposition, groups, σ, center, scale, init_model)
A MLJ model that fits σ-Ridge Regression with `groups` and parameter `σ`.
`center` and `scale` (default `true` for both) control whether the response and
features should be centered and scaled first (make sure that `center=true` if the
model is supposed to have an intercept!). `decomposition` can be one of `:default`,
`:cholesky` or `:woodbury` and determines how the linear system is solved. `init_model`
is the initial model used to define the Method of Moments map from `σ` to `λ`; it defaults
to leave-one-out ridge without groups.
"""
Base.@kwdef mutable struct SigmaRidgeRegressor{G,T,M} <:
SigmaRidgeRegression.AbstractGroupRidgeRegressor
decomposition::Symbol = :default
groups::G
σ::T = 1.0
center::Bool = true
scale::Bool = true
init_model::M = LooRidgeRegressor(;ridge=SingleGroupRidgeRegressor(
decomposition = decomposition,
groups = groups,
center = center,
scale = scale,)
)
end
_main_hyperparameter(::SigmaRidgeRegressor) = :σ
function _default_hyperparameter_maximum(model::SigmaRidgeRegressor, fitted_machine)
sqrt(σ_squared_max(fitted_machine.cache.mom))
end
_default_param_min_ratio(::SigmaRidgeRegressor, fitted_machine) = 1e-3
_default_scale(::SigmaRidgeRegressor, fitted_machine) = :linear
function MMI.fit(m::SigmaRidgeRegressor, verb::Int, X, y)
@unpack init_model, decomposition, center, scale, groups = m
init_machine = MLJ.machine(init_model, X, y)
fit!(init_machine; verbosity = verb)
mom = MomentTunerSetup(init_machine.cache)
σ = m.σ
λs = SigmaRidgeRegression.get_λs(mom, abs2(σ))
multiridge = MultiGroupRidgeRegressor(;
groups=groups,
λs=λs,
decomposition = decomposition,
center = center,
scale = scale,
)
multiridge_machine = MLJ.machine(multiridge, X, y)
fit!(multiridge_machine; verbosity = verb)
workspace = multiridge_machine.cache
cache = (workspace = workspace, mom = mom, multiridge_machine = multiridge_machine)
βs = StatsBase.coef(workspace)
x_transform = multiridge_machine.fitresult.x_transform
y_transform = multiridge_machine.fitresult.y_transform
fitresult = (coef = βs, x_transform = x_transform, y_transform = y_transform)
# return
return fitresult, cache, NamedTuple{}()
end
function MMI.update(
model::SigmaRidgeRegressor,
verbosity::Int,
old_fitresult,
old_cache,
X,
y,
)
@unpack init_model, decomposition, center, scale, groups = model
workspace = old_cache.workspace
multiridge_machine = old_cache.multiridge_machine
mom = old_cache.mom
σ = model.σ
λs = SigmaRidgeRegression.get_λs(mom, abs2(σ))
multiridge = MultiGroupRidgeRegressor(;
groups=groups,
λs=λs,
decomposition = decomposition,
center = center,
scale = scale,
)
multiridge_machine.model = multiridge
fit!(multiridge_machine; verbosity = verbosity)
cache = (workspace = workspace, mom = mom, multiridge_machine = multiridge_machine)
βs = StatsBase.coef(workspace)
x_transform = multiridge_machine.fitresult.x_transform
y_transform = multiridge_machine.fitresult.y_transform
fitresult = (coef = βs, x_transform = x_transform, y_transform = y_transform)
return fitresult, cache, NamedTuple{}()
end
"""
LooSigmaRidgeRegressor(; kwargs...)
Convenience constructors and type-alias for `LooRidgeRegressor{<:SigmaRidgeRegressor}`.
Equivalent to `LooRidgeRegressor(;ridge= SigmaRidgeRegressor())` with `kwargs`
passed to both `LooRidgeRegressor` and `SigmaRidgeRegressor`.
"""
const LooSigmaRidgeRegressor = LooRidgeRegressor{<:SigmaRidgeRegressor}
function LooSigmaRidgeRegressor(;
tuning = DefaultTuning(),
rng = Random.GLOBAL_RNG, kwargs...)
sigma_ridge = SigmaRidgeRegressor(;kwargs...)
LooRidgeRegressor(;ridge=deepcopy(sigma_ridge), tuning=tuning, rng=rng)
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 3670 | # Code below is hard-copied from
# https://github.com/ahwillia/NonNegLeastSquares.jl
# See that repository for the license of this file.
# (Temporary stop-gap until the above package is registered)
using Distributed
function nonneg_lsq(A, B; alg::Symbol = :pivot, gram::Bool = false, kwargs...)
if alg == :fnnls
return fnnls(A, B; gram = gram, kwargs...)
else
error("Specified algorithm :", alg, " not recognized.")
end
end
# If second input is a vector, convert it to a matrix
nonneg_lsq(A, b::AbstractVector; kwargs...) = nonneg_lsq(A, b[:, :]; kwargs...)
"""
x = fnnls(AtA, Atb; ...)
Returns x that solves A*x = b in the least-squares sense, subject to x >=0. The
inputs are the cross-products AtA = A'*A and Atb = A'*b. Uses the modified
active set method of Bro and De Jong (1997).
Optional arguments:
tol: tolerance for nonnegativity constraints
max_iter: maximum number of iterations (counts inner loop iterations)
References:
Bro R, De Jong S. A fast non-negativitity-constrained least squares
algorithm. Journal of Chemometrics. 11, 393–401 (1997)
"""
function fnnls(
AtA,
Atb::AbstractVector{T};
tol::Float64 = 1e-8,
max_iter = 30 * size(AtA, 2),
) where {T}
n = size(AtA, 1)
x = zeros(T, n)
s = zeros(T, n)
# P is a bool array storing positive elements of x
# i.e., x[P] > 0 and x[~P] == 0
P = x .> tol
w = Atb - AtA * x
# We have reached an optimum when either:
# (a) all elements of x are positive (no nonneg constraints activated)
# (b) ∂f/∂x = A' * (b - A*x) > 0 for all nonpositive elements of x
iter = 0
while sum(P) < n && any(w[(!).(P)] .> tol) && iter < max_iter
# find i that maximizes w, restricting i to indices not in P
# Note: the while loop condition guarantees at least one w[~P]>0
i = argmax(w .* (!).(P))
# Move i to P
P[i] = true
# Solve least-squares problem, with zeros for columns/elements not in P
s[P] = AtA[P, P] \ Atb[P]
s[(!).(P)] .= zero(eltype(s)) # zero out elements not in P
# Inner loop: deal with negative elements of s
while any(s[P] .<= tol)
iter += 1
# find indices in P where s is negative
ind = @__dot__ (s <= tol) & P
# calculate step size, α, to prevent any xᵢ from going negative
α = minimum(x[ind] ./ (x[ind] - s[ind]))
# update solution (pushes some xᵢ to zero)
x += α * (s - x)
# Remove all i in P where x[i] == 0
for i = 1:n
if P[i] && abs(x[i]) < tol
P[i] = false # remove i from P
end
end
# Solve least-squares problem again, zeroing nonpositive columns
s[P] = AtA[P, P] \ Atb[P]
s[(!).(P)] .= zero(eltype(s)) # zero out elements not in P
end
# update solution
x = deepcopy(s)
w .= Atb - AtA * x
end
return x
end
function fnnls(
A,
B::AbstractMatrix;
gram::Bool = false,
use_parallel::Bool = true,
kwargs...,
)
n = size(A, 2)
k = size(B, 2)
if gram
# A,B are actually Gram matrices
AtA = A
AtB = B
else
# cache matrix computations
AtA = A' * A
AtB = A' * B
end
if use_parallel && nprocs() > 1
X = @distributed (hcat) for i = 1:k
fnnls(AtA, AtB[:, i]; kwargs...)
end
else
X = Array{eltype(B)}(undef, n, k)
for i = 1:k
X[:, i] = fnnls(AtA, AtB[:, i]; kwargs...)
end
end
return X
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 1930 | abstract type AbstractResponseModel end
"""
random_betas(gr::GroupedFeatures, αs)
Suppose `gr` consists of ``K`` groups with ``p_1, \\dotsc, p_K`` features each.
Then this returns a random vector of βs of length ``\\sum p_g``,
where for `j` in the `g`-th group
we draw (independent) ``β_j \\sim N(0, α_g^2/p_g)``.
``α_g`` is the `g`-th element of the vectpr `αs`.
"""
function random_betas(gr::GroupedFeatures, αs)
ps = gr.ps
βs = zeros(eltype(αs), gr.p)
for i = 1:gr.num_groups
βs[group_idx(gr, i)] .= randn(ps[i]) .* sqrt(αs[i]^2 / ps[i])
end
βs
end
Base.@kwdef struct RandomLinearResponseModel <: AbstractResponseModel
αs::Vector{Float64}
grp::GroupedFeatures
iid_measure = Normal()
end
function (resp::RandomLinearResponseModel)(X)
β = random_betas(resp.grp, resp.αs) #todo, allow other noise dbn.
Xβ = X * β
Xβ, β
end
Base.@kwdef struct GroupRidgeSimulationSettings{C,R,D}
groups::GroupedFeatures
Σ::C
response_model::R
response_noise::D = Normal()
ntest::Int = 10000
ntrain::Int
iid_measure = Normal()
end
Base.@kwdef struct GroupRidgeSimulation
groups::GroupedFeatures
X::Matrix{Float64}
Y::Vector{Float64}
resampling_idx = nothing
β = nothing
end
function simulate(group_simulation::GroupRidgeSimulationSettings)
ntrain = group_simulation.ntrain
ntest = group_simulation.ntest
ntotal = ntrain + ntest
@unpack response_model, response_noise = group_simulation
X = simulate_rotated_design(
group_simulation.Σ,
ntotal;
rotated_measure = group_simulation.iid_measure,
)
Xβ, β = response_model(X)
Y = Xβ .+ rand(response_noise, ntotal)
resampling_idx = [(1:ntrain, (ntrain+1):ntotal)]
GroupRidgeSimulation(;
groups = group_simulation.groups,
X=X,
Y=Y,
resampling_idx = resampling_idx,
β = β,
)
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 1785 | function _integrate_spectrum(h::Float64, γ, λ, f) #interpreted as point mass spectrum
denom = (λ / h + 1 / (1 + γ * f))
1 / denom
end
#interpreted as
function _integrate_spectrum(h::Distribution, γ, λ, f)
expectation(u->_integrate_spectrum(u, γ, λ, f), h)
end
function fixed_point_function(hs, γs, λs)
γ = sum(γs)
fixed_point_f = f -> f - sum(γs ./ γ .* _integrate_spectrum.(hs, γ, λs, f))
find_zero(fixed_point_f, (0.0, 100.0))
end
function risk_formula(hs, γs, αs, λs)
λs = min.(λs, 10_000) #hack for now until properly dealing with Infinity
γ = sum(γs)
fixed_pt = λs_tilde -> fixed_point_function(hs, γs, λs_tilde)
f = fixed_pt(λs)
∇f = grad(central_fdm(5, 1), fixed_pt, λs)[1]
#return ∇f
#return γ ./ γs .* (γs .* λs - αs.^2 .* λs.^2) .* ∇f
1 + γ * f + sum(γ ./ γs .* (γs .* λs - αs .^ 2 .* λs .^ 2) .* ∇f)
end
function r_squared(hs, γs, αs, λs)
response_var = 1 + sum(abs2, αs)
risk = risk_formula(hs, γs, αs, λs)
1 - risk / response_var
end
function optimal_r_squared(αs, γs, hs)
λs_opt = γs ./ αs .^ 2
r_squared(hs, γs, αs, λs_opt)
end
function optimal_λs(γs, αs)
γs ./ αs .^ 2
end
function optimal_risk(hs, γs, αs)
λs_opt = optimal_λs(γs, αs)
risk_formula(hs, γs, αs, λs_opt)
end
function optimal_single_λ(γs, αs)
λ_opt = sum(γs) / sum(abs2, αs)
λs_opt = fill(λ_opt, 2)
end
function optimal_single_λ_risk(hs, γs, αs)
λs_opt = optimal_single_λ(γs, αs)
risk_formula(hs, γs, αs, λs_opt)
end
function optimal_ignore_second_group_λs(γs, αs)
λ1_opt = γs[1] * (1 + αs[2]^2) / αs[1]^2
λs_opt = [λ1_opt; Inf]
end
function optimal_ignore_second_group_risk(hs, γs, αs)
λs_opt = optimal_ignore_second_group_λs(γs, αs)
risk_formula(hs, γs, αs, λs_opt)
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 263 | function _prod_diagonals!(Y, A, B)
@inbounds for j ∈ 1:size(A, 1)
Y[j] = 0
@inbounds for i ∈ 1:size(A, 2)
Y[j] += A[j, i] * B[i, j]
end
end
Y
end
function random_rotation(p)
mat = randn(p, p)
qr(mat).Q
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 1394 | export CLLData
"""
CLLData
A dataset of different omic measurements for Chronic lymphocytic leukaemia (CLL)
patient samples. The data can be loaded via:
```
cll_data = CLLData.load()
```
`cll_data` is a named tuple with fields:
* `X`: The features.
* `y`: The response, namely Ibrutinib sensitivity.
* `ngr`: The number of features in each of the three feature groupings, namely
drug sensitivity, methylation and RNAseq data.
* `foldid`: A `Vector{Int}` with values in 1,..,10 that assign each of the rows of `X` to
a fold to be used in cross-validation.
## References
The dataset was originally published in:
Dietrich, Sascha, et al. "Drug-perturbation-based stratification of blood cancer."
The Journal of clinical investigation 128.1 (2018): 427-445.
It was used in the context of side-information by:
Velten, Britta, and Wolfgang Huber.
"Adaptive penalization in high-dimensional regression and classification
with external covariates using variational Bayes."
Biostatistics (2019).
The `foldid` assignment into folds is the same as the one used by the above publication.
The dataset was copied from the Bioconductor MOFAdata package, available at:
https://bioconductor.org/packages/release/data/experiment/html/MOFAdata.html
"""
module CLLData
using JLD2
const DATA = joinpath(@__DIR__, "cll_data.jld2")
function load()
JLD2.@load DATA cll_data
cll_data
end
end
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 7394 | using Plots
using LaTeXStrings
using CategoricalArrays
using GLM
using Distributions
using SigmaRidgeRegression
using LinearAlgebra
using StatsBase
using Plots
using Random
tmp_X = whiten_covariates(X, Σ_chol)
cov(tmp_X)
estimate_var(DickerMoments(),X,Y; Σ = Σ_chol)
using ForwardDiff
σ = 5.0
#gr = GroupedFeatures(repeat([200],5))
Random.seed!(1)
σ = 5.0
grp = GroupedFeatures([30;30;30;30;30;30])
n = 400
p = grp.p
ρ = 0.7
Σ = [ρ^(abs(i-j)) for i=1:p,j=1:p]
Σ_chol = cholesky(Σ)
X = randn(n, p) * Σ_chol.UL
Xnew = randn(10000, p) * Σ_chol.UL
using Statistics
cov(X)
#X= randn(n, p) .+ randn(n)
αs = sqrt.(range(4.0,12.0,length=ngroups(grp)))#r#ange(2.0, 2.75, 3.5; length=3)#1.0:5.0
β = random_betas(grp, αs)
group_summary(grp, β, norm)
sum(abs2, β)
Y = X*β .+ σ .* randn(n)
tmp = BasicGroupRidgeWorkspace(X=X, Y=Y, groups=grp)
λωλας_λ(tmp)
λs
σ_squared_max(mom)
using StatsBase
StatsBase.fit!(tmp, λωλας_λ(tmp))
mom = MomentTunerSetup(tmp)
tune_σ(1.0)
SigmaRidgeRegression.
min1 = opt_res.minimizer
lambda_min1 = get_λs(mom, min1)
min_val1 = opt_res.minimum
β1 = copy(tmp.β_curr)
function tune_λ(λ)
fit!(tmp, λ)
end
lower_box_constraint = fill(0.0, 6)
upper_box_constraint = fill(Inf, 6)
opt_res2 = optimize(tune_λ,
lower_box_constraint, upper_box_constraint,
lambda_min1)
opt_res2.minimizer
opt_res2.minimizer
opt_res2.minimum
fit!(tmp, opt_res2.minimizer)
β2 = copy(tmp.β_curr)
mean(abs2, X*(β-β1))
mean(abs2, X*(β-β2))
opt_res3 = optimize(tune_λ,
lower_box_constraint, upper_box_constraint,
fill(1.0,6))
opt_res3.minimizer
opt_res3.minimizer
#opt_res3.minimum
opt_res.minimizer
oracle_λ = σ^2 ./ αs.^2 .* 30 ./ n
using Optim
using Plots
scatter( αs.^2, SigmaRidgeRegression.get_αs_squared(mom,1.0))
plot!(αs.^2,αs.^2)
σs_squared = range(0.01, 3.0; length=100)
mypath1 = sigma_squared_path(tmp, mom, σs_squared)
plot(σs_squared, mypath1.loos)
using Plots
plot(σs_squared, mypath1.λs)
λs = vcat([get_λs(mom, s)' for s in σs_squared]...)
pl = plot(σs_squared, λs)
σs_squared = range(0.01, 50.0; length=100)
mypath = sigma_squared_path(tmp, mom, σs_squared)
plot(σs_squared, mypath.loos)
four_cols_rep = hcat([fill(col, 1, 30) for col in ["#440154"; "#31688E"; "#35B779"]]...)
linetype_rep = hcat([fill(col, 1, 25) for col in [:dash,:dot,:dashdot, :solid]]...)
[:black,"#9818d6","#ff5151"]
#66c2a5
#fc8d62
#8da0cb
#e78ac3
four_cols_rep = hcat([fill(col, 1, 30) for col in [:black; :purple; :green]]...)
plot(σs_squared, mypath.βs, alpha=0.8, linewidth=1.0,label="", color=four_cols_rep, ylim=(-2,2))
using Plots
using PlotThemes
theme(:default)
opt_λ_empirical = σ^2/norm(β)^2*γ
opt_λ = σ^2/20.0*p/n
p/n
max_σ_squared(tmp)
fit!(tmp, 0.050)
mom = MomentTunerSetup(tmp)
mom.M_squared
my
find_λs_squared(mom, 0.2)
find_αs_squared(mom, 1.0)
using NonNegLeastSquares
using Plots
using LaTeXStrings
pgfplotsx()
, ylab=L"\hat{\lambda}(\sigmacv)", xlab=L"\sigmacv", size=(300,200));
pl = plot(σs_squared, get_α, size=(500,400))
savefig(pl, "pl2.tex")
mom.N_norms_squared
find_αs(mom, 2.0)
using NonNegLeastSquares
find_αs(mom, 20.0)
using RandomMatrices
myrot = rand(GaussianHermite{1},n,1)
n_test = 20_000
n = 10_000
p = 1_000
σ = 1.0
γ = p/n
X = randn(n, p) .+ randn(n) #strong positive correlations.
Z = randn(p,p)
Z_qr_Q = Matrix(qr(Z).Q)
my_eigs = [fill(5, 500);fill(1, 500)]
Σ = Z_qr_Q * Diagonal(my_eigs) * Z_qr_Q'
X = real.(Matrix((sqrt(Σ)*randn(p,n))'))
X_qr.Q'*X_qr.Q
Y_test = X_test*β .+ σ .* randn(n_test)
1.41
tmp.XtXpΛ
size(tmp.X)
size(tmp.XtXpΛ_div_Xt)
size(tmp.Y)
StatsBase.fit!(tmp, 2.0)
diag(tmp.X*inv(tmp.XtX + Diagonal(group_expand(tmp.groups, tmp.λs)))tmp.X')
tmp.leverage_store
hat_matrix = tmp.X*inv(tmp.XtX + Diagonal(group_expand(tmp.groups, tmp.λs)))tmp.X'./n
hat_matrix*tmp.Y ≈ tmp.X * tmp.β_curr
diag(hat_matrix) ≈ tmp.leverage_store
mean(diag(hat_matrix))
mean(tmp.leverage_store)
using ForwardDiff
using Zygote
Zygote.gradient(λ->fit!(tmp, λ), fill(1.0,5))
ForwardDiff.gradient(λ->fit!(tmp, λ), fill(1.0,5))
ForwardDiff.Hess
loo_error(tmp)
mse_ridge(tmp, X_test, Y_test)
λs = range(0.00, 3.0; length=50)
mses_hat = zeros(length(λs))
loos_hat = zeros(length(λs))
for (i, λ) in enumerate(λs)
fit!(tmp, λ)
mses_hat[i] = mse_ridge(tmp, X_test, Y_test)
loos_hat[i] = loo_error(tmp)
end
using Plots
using LaTeXStrings
plot(λs, [mses_hat loos_hat], color=["black" "blue"], linestyle=[:solid :dot],
label=["MSE" "LOO"], xlabel=L"\lambda")
plot(λs, mses_hat .- loos_hata)
fits = [solve_ridge(XtX, XtY, X, Y, λ) for λ in λs]
mses_hat = [mse_ridge(X_test, Y_test, fit[:β_hat]) for fit in fits]
loos_hat = [fit[:LOO_error] for fit in fits]
using Plots
using LaTeXStrings
vline!([opt_λ_empirical opt_λ], color=[:green :red])
true_error =
tmp2 = fit!(tmp., 1.0:5.0)
tmp2.XtXpΛ_chol\(tmp.XtX + Diagonal(group_expand(tmp.groups, 1.0:5.0))) ≈ I
(tmp.XtX + Diagonal(group_expand(tmp.groups, 1.0:5.0)))\tmp.XtY ≈ tmp.β_curr
function BasicGroupRidgeWorkspace(X, Y, groups)
end
mychol = cholesky(XtX)
vs = XtX
ldiv!(XtX, mychol, I)
cholesky(XtX)
isa(vs, AbstractMatrix)
#function rand()
#n_test = 10_000
β = randn(p) .* sqrt(α^2/p)
norm(β)^2
XtY = X'*Y./n
function solve_ridge(XtX::Symmetric, XtY, X, Y, λ; compute_M_matrix=false)
n, p = size(X)
chol_trans_inv = inv(cholesky(XtX + λ*I(p)))
β_hat = chol_trans_inv*XtY
hat_matrix = X*chol_trans_inv*X' ./ n
Y_hat = X*β_hat
LOO_error = norm( (Y .- Y_hat)./ ( 1.0 .- diag(hat_matrix)))^2 / n
res= Dict(:β_hat => β_hat, :LOO_error => LOO_error, :λ => λ)
if compute_M_matrix
M_matrix = chol_trans_inv * XtX
N_matrix = chol_trans_inv * X'./n
res[:M_matrix] = M_matrix
res[:N_matrix] = N_matrix
end
res
end
P_mat = ridge_sol[:N_matrix]
sol_matrix
sol_rhs
β_hat_norms
α_squared_hat = sol_matrix\sol_rhs
matrix_sol =
# want a fun:
# FeatureGroups
# repeat(..., FeatureGroups)
# + iterator protocol for groups.
# groupwise(Groups(), \beta::Vector, )
# groupwise(Groups(), \beta::Matrix, )
# groupwise(Groups(), \beta::Matrix, )
a = reshape(Vector(1:16), (4,4))
@which reduce(max, a, dims=1)
Σ = [ρ^(abs(i-j)) for i=1:p,j=1:p]
myinv = inv(Σ_chol.UL)
myinv*Σ*myinv'
X = randn(n, p) * Σ_chol.L
woodbury_playi
using WoodburyMatrices
A, B, D
bla = copy(tmp.λs)
bla[6] = Inf
A_tmp = Diagonal(group_expand(tmp.groups, bla))
wd = SymWoodbury(A_tmp, X', I(n)/n)
wd.Dp
wd.B
wd.
Dp = inv(n*I + wd.B'*(A_tmp\wd.B))
wd.Dp ≈ Dp
≈ Dp
Dp = safeinv(safeinv(D) .+ B'*(A\B))
?Woodbury
Random.seed!(100)
σ = 4.0
grp = GroupedFeatures([300;3000;5000])
n = 400
p = grp.p
X = randn(n, p)# * Σ_chol.UL
αs = sqrt.([4.0;8.0;12.0])#r#ange(2.0, 2.75, 3.5; length=3)#1.0:5.0
β = random_betas(grp, αs)
group_summary(grp, β, norm)
sum(abs2, β)
Y = X*β .+ σ .* randn(n)
tmp = BasicGroupRidgeWorkspace(X=X, Y=Y, groups=grp,
XtXpΛ_chol = WoodburyRidgePredictor(X))
fit!(tmp, λωλας_λ(tmp))
mom = MomentTunerSetup(tmp)
#scatter( αs.^2, SigmaRidgeRegression.get_αs_squared(mom,1.0))
#plot!(αs.^2,αs.^2)
σs_squared1 = range(0.0001, 34; length=30)
mypath1 = sigma_squared_path(tmp, mom, σs_squared1)
#with gr
plot(sqrt.(σs_squared1), mypath1.loos)
get_λs(mom, 4) | SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 1400 | using SigmaRidgeRegression
using Test
using LinearAlgebra
using Random
import StatsBase
@testset "Woodbury and Cholesky" begin
Random.seed!(1)
σ = 5.0
grp = GroupedFeatures([30;30;30;30;30;30])
n = 400
p = grp.p
X = randn(n, p)
αs = sqrt.(range(4.0,12.0,length=ngroups(grp)))#r#ange(2.0, 2.75, 3.5; length=3)#1.0:5.0
β = random_betas(grp, αs)
Y = X*β .+ σ .* randn(n)
tmp_chol = BasicGroupRidgeWorkspace(X=X, Y=Y, groups=grp)
tmp_woodb = BasicGroupRidgeWorkspace(X=X, Y=Y, groups=grp,
XtXpΛ_chol = WoodburyRidgePredictor(X))
@test SigmaRidgeRegression.λωλας_λ(tmp_chol) ≈ SigmaRidgeRegression.λωλας_λ(tmp_woodb)
beta_chol = tmp_chol.XtXpΛ_chol \ tmp_woodb.XtY
beta_wdb = tmp_woodb.XtXpΛ_chol \ tmp_woodb.XtY
@test beta_chol ≈ beta_wdb
tmp_chol.XtXpΛ_chol \ tmp_chol.X' ≈ tmp_woodb.XtXpΛ_chol \ tmp_woodb.X'
ldiv_chol = ldiv!(tmp_chol.XtXpΛ_div_Xt, tmp_chol.XtXpΛ_chol, tmp_chol.X')
ldiv_wdb = ldiv!(tmp_woodb.XtXpΛ_div_Xt, tmp_woodb.XtXpΛ_chol, tmp_woodb.X')
@test ldiv_chol ≈ ldiv_wdb
loo_chol = StatsBase.fit!(tmp_chol, SigmaRidgeRegression.λωλας_λ(tmp_chol))
loo_wdb = StatsBase.fit!(tmp_woodb, SigmaRidgeRegression.λωλας_λ(tmp_chol))
@test tmp_woodb.β_curr ≈ tmp_chol.β_curr
end
include("test_mmi.jl")
#include("test_grouplasso.jl")
include("test_covariance.jl")
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 566 | using SigmaRidgeRegression
using LinearAlgebra
using Test
tmp_block = BlockCovarianceDesign([IdentityCovarianceDesign(), IdentityCovarianceDesign(missing)], missing)
id = IdentityCovarianceDesign()
groups = GroupedFeatures([200;200])
@test set_groups(id, 400) == set_groups(id, groups)
instantiated_block = set_groups(tmp_block, groups)
bla = simulate_rotated_design(instantiated_block, 20)
@test size(bla) == (20,400)
instantiated_block.blocks[1]
spectrum(instantiated_block)
cov1 = SigmaRidgeRegression.UniformScalingCovarianceDesign(p=100, scaling=2.5)
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 3893 | using RCall
using SigmaRidgeRegression
using LinearAlgebra
import StatsBase:fit
using StatsBase
using Random
using Test
using MLJ
using Plots
Random.seed!(1)
n = 100
p = 80
X = randn(n, p)
Xtable = MLJ.table(X);
βs = randn(p)./sqrt(p)
Y = X*βs .+ randn(n)
groups = GroupedFeatures([30;30;20])
glasso = GroupLassoRegressor(groups=groups)
glasso_machine = machine(glasso, X, Y)
fit!(glasso_machine)
λ_max = SigmaRidgeRegression._default_hyperparameter_maximum(glasso, glasso_machine)
glasso_machine.model.λ = λ_max
fit!(glasso_machine)
group_summary(groups, glasso_machine.fitresult.coef, norm)
group_index = group_expand(groups, Base.OneTo(ngroups(groups)))
R"library(gglasso)"
@rput X
@rput Y
@rput group_index
@rput p
R"gglasso_fit_all <- gglasso(X, Y, group=group_index, intercept=FALSE)"
R"lambda_max <- max(gglasso_fit_all$lambda)"
@rget lambda_max
@test lambda_max .*sqrt(p) ≈ λ_max
for λ in [0.001; 0.1; 0.5; 1.0; λ_max/2]
glasso_machine.model.λ = λ
fit!(glasso_machine)
R"gglasso_fit <- gglasso(X, Y, group=group_index, lambda=$λ / sqrt(p), intercept=FALSE)"
R"beta <- gglasso_fit$beta"
@rget beta
beta = vec(beta)
@test group_summary(groups, beta, norm) ≈ group_summary(groups, fitted_params(glasso_machine).fitresult.coef, norm) atol =0.005
@test norm( beta .- fitted_params(glasso_machine).fitresult.coef, Inf) < 0.005
end
@rput Xstand
@rput Ystand
R"gglasso_fit1 <- gglasso(Xstand, Ystand, group=group_index, lambda=0.02, intercept=FALSE)"
R"gpreg_fit1 <- grpreg(Xstand, Ystand, group=group_index, lambda=0.01)"
R"as.vector(predict(gglasso_fit1, Xstand))"
R"predict(gpreg_fit1, Xstand)"
MLJ.predict(glasso_machine)
# Now check CVGGLasso code
Xstand = StatsBase.transform(StatsBase.fit(StatsBase.ZScoreTransform, X; dims=1), X)
Ystand = Y .- mean(Y)
cvgglasso = CVGGLassoRegressor(groups=groups)
cvgglasso_machine = machine(cvgglasso, Xstand, Ystand)
fit!(cvgglasso_machine)
cvgglasso_machine.report.param_max
cvgglasso_machine.report.tmp_intercept
multiridge = MultiGroupRidgeRegressor(;groups=groups, λs=cvgglasso_machine.report.best_λs)
multiridge_machine = machine(multiridge, Xstand, Ystand)
fit!(multiridge_machine)
@test predict(multiridge_machine) ≈ predict(cvgglasso_machine) atol =0.01
new_X = randn(2,p)
@test predict(multiridge_machine, new_X) ≈ predict(cvgglasso_machine, new_X) atol =0.01
cvgglasso_machine.report.best_param
grpreglasso = CVGGLassoRegressor(groups=groups, engine=:grpreg)
grpreglasso_machine = machine(grpreglasso, Xstand, Ystand)
fit!(grpreglasso_machine)
grpreglasso_machine.report.best_param
grpreglasso_machine.report.param_max
grpreglasso_machine.report.tmp_intercept
loo_glasso = LooRidgeRegressor(ridge = glasso)
loo_glasso_machine = machine(loo_glasso, X, Y)
fit!(loo_glasso_machine)
loo_glasso_machine.report.best_λs
cv_glasso = TunedRidgeRegressor(ridge = glasso, resampling=CV(nfolds=5,shuffle=true, rng=1))
cv_glasso_machine = machine(cv_glasso, X, Y)
fit!(cv_glasso_machine)
cv_glasso_machine.report.best_param
cv_glasso_machine.report.best_λs
loo_list = loo_glasso_machine.report.loos
λ = loo_glasso_machine.report.best_model.λ
using Plots
plot(λ,loo_list, xscale=:log10)
λ_path = vcat(loo_glasso_machine.report.λs'...)
plot(λ , λ_path, xscale=:log10, yscale=:log10)
#ps = fill(50, 10)
#n = 200
#grp = GroupedFeatures(ps)
#design = IdentityCovarianceDesign(grp.p)
#αs = vcat(fill(0.0, 5), fill(3.0,5))
#ridge_sim= GroupRidgeSimulationSettings(grp = grp,
# ntrain= n,
# Σ = design,
# response_model = RandomLinearResponseModel(αs = αs, grp=grp))
#sim_res = simulate(ridge_sim)
Random.seed!(1)
n = 1000
p = 800
X = randn(n, p)
Xtable = MLJ.table(X);
βs = randn(p)./sqrt(p)
Y = X*βs .+ randn(n)
groups = GroupedFeatures([300;300;200])
cvgglasso = CVGGLassoRegressor(groups=groups, eps=1e-4)
cvgglasso_machine = machine(cvgglasso, X, Y)
@time fit!(cvgglasso_machine)
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | code | 7471 | using MLJLinearModels
using MLJ
using MLJModelInterface
import StatsBase
using SigmaRidgeRegression
using Test
using Random
using Plots
const MMI = MLJModelInterface
# Mostly here to check implementation.
# Let us first test if the code works for a single predictor
single_group_ridge_reg = SingleGroupRidgeRegressor(decomposition=:cholesky, λ=0.0, center=false, scale=false)
single_group_ridge_reg_woodbury = SingleGroupRidgeRegressor(decomposition=:woodbury, λ=0.0, center=false, scale=false)
mljlm_ridge = RidgeRegressor(lambda=0.0, fit_intercept=false)
Random.seed!(1)
n = 100
p = 80
X = randn(n, p)
Xtable = MLJ.table(X);
βs = randn(p)./sqrt(p)
Y = X*βs .+ randn(n)
grps = GroupedFeatures([p]);
single_group_ridge_machine = machine(single_group_ridge_reg, Xtable, Y)
single_group_ridge_woodbury_machine = machine(single_group_ridge_reg_woodbury, Xtable, Y)
mljlm_ridge_machine = machine(mljlm_ridge, Xtable, Y)
fit!(single_group_ridge_machine)
@test_broken fit!(single_group_ridge_woodbury_machine)#cannot handle 0.0
fit!(mljlm_ridge_machine)
@test predict(single_group_ridge_machine) ≈ predict(mljlm_ridge_machine)
single_group_ridge_machine.model.λ = 1.0
single_group_ridge_woodbury_machine.model.λ = 1.0
mljlm_ridge_machine.model.lambda = 1.0 * n
fit!(single_group_ridge_machine)
fit!(single_group_ridge_woodbury_machine)
fit!(mljlm_ridge_machine)
@test predict(single_group_ridge_machine) ≈ predict(mljlm_ridge_machine)
@test predict(single_group_ridge_machine) ≈ predict(single_group_ridge_woodbury_machine)
# check above with scaling/centering
for scale in [false]
for decomposition in [:cholesky; :woodbury]
@show scale, decomposition
Yshift = Y .+ 10.0
Y_center = Yshift .- mean(Yshift)
X_center_transform = StatsBase.fit(StatsBase.ZScoreTransform, X; dims=1,scale=scale, center=true)
X_center = StatsBase.transform(X_center_transform, X)
single_group_ridge_reg_centered = SingleGroupRidgeRegressor(decomposition=decomposition, λ=1.0, center=true, scale=scale)
single_group_ridge_reg_centered_machine = machine(single_group_ridge_reg_centered, X, Yshift)
fit!(single_group_ridge_reg_centered_machine)
single_group_ridge_reg_tmp = SingleGroupRidgeRegressor(decomposition=decomposition, λ=1.0, center=false, scale=false)
single_group_ridge_machine_centered_data = machine(single_group_ridge_reg_tmp, X_center, Y_center)
fit!(single_group_ridge_machine_centered_data)
@test predict(single_group_ridge_reg_centered_machine) == predict(single_group_ridge_machine_centered_data) .+ mean(Yshift)
end
end
# .+ mean(Y)
# Start checking LOOCVRidgeRegressor
loocv_ridge = LooRidgeRegressor(; ridge=SingleGroupRidgeRegressor(;scale=false, center=false))
loocv_ridge_machine = machine(loocv_ridge, X, Y)
@time fit!(loocv_ridge_machine)
λ_max = loocv_ridge_machine.report.param_max
λ_range = loocv_ridge_machine.report.param_range
## Compare against brute froce predictions
loocv_ridge_bruteforce = TunedModel(model = single_group_ridge_reg,
tuning = Grid(resolution=loocv_ridge.tuning.resolution),
resampling= CV(nfolds=n),
measure = l2,
range = λ_range)
loocv_ridge_bruteforce_machine = machine(loocv_ridge_bruteforce, X,Y)
@time fit!(loocv_ridge_bruteforce_machine)
loos_brute = [h.measurement[1] for h in loocv_ridge_bruteforce_machine.report.history]
λs_brute = [h.model.λ for h in loocv_ridge_bruteforce_machine.report.history]
plot(λs_brute, loos_brute; seriestype=:scatter, xscale=:log10)
plot!(loocv_ridge_machine.report.params, loocv_ridge_machine.report.loos)
@test loocv_ridge_machine.report.best_param ≈ loocv_ridge_bruteforce_machine.report.best_model.λ
@test loocv_ridge_machine.fitresult.coef == loocv_ridge_bruteforce_machine.fitresult.fitresult.coef
@test predict(loocv_ridge_machine) == predict(loocv_ridge_bruteforce_machine)
Xnew = MLJ.table(randn(10, p));
@test predict(loocv_ridge_machine, Xnew) == predict(loocv_ridge_bruteforce_machine, Xnew)
## visualize
plot(loocv_ridge_machine.report.params, loocv_ridge_machine.report.loos, xscale=loocv_ridge_machine.model.tuning.scale, label="loo shortcut")
vline!([loocv_ridge_machine.report.best_param])
single_ridge_cv_curve_loo = learning_curve(single_group_ridge_machine, range=λ_range, resampling=CV(nfolds=n), measure=l2)
plot!(single_ridge_cv_curve_loo.parameter_values,
single_ridge_cv_curve_loo.measurements,
xlab=single_ridge_cv_curve_loo.parameter_name,
xscale=single_ridge_cv_curve_loo.parameter_scale,
label = "LOO brute force")
# Let us also try with other number of folds
single_ridge_cv = TunedModel(model = single_group_ridge_reg,
tuning = Grid(resolution=100),
resampling= CV(nfolds=5),
measure = l2,
range = λ_range)
single_ridge_cv_machine = machine(single_ridge_cv, Xtable, Y)
single_ridge_cv_curve_5fold = learning_curve(single_group_ridge_machine, range=λ_range, resampling=CV(nfolds=5), measure=l2)
plot!(single_ridge_cv_curve_5fold.parameter_values,
single_ridge_cv_curve_5fold.measurements,
xlab=single_ridge_cv_curve_5fold.parameter_name,
xscale=single_ridge_cv_curve_5fold.parameter_scale,
label = "5-fold",
ylab = "CV estimate of RMS error")
tmp_eval = evaluate!(single_group_ridge_machine, resampling=CV(nfolds=n), measure=l2)
@test tmp_eval.measurement[1] ≈ loo_error(single_group_ridge_machine.cache) atol=0.02
# Check multiridge
multiridge = MultiGroupRidgeRegressor(;groups=GroupedFeatures([30;50]), center=false, scale=false)
loocv_multiridge = LooRidgeRegressor(ridge=multiridge, tuning=SigmaRidgeRegression.DefaultTuning(resolution=10))
loocv_multiridge_mach = machine(loocv_multiridge, X, Y)
fit!(loocv_multiridge_mach)
multiridge_ranges = loocv_multiridge_mach.report.param_range
multiridge_loo_bruteforce = TunedModel(model=multiridge,
resampling=CV(nfolds=n),
tuning=Grid(resolution=loocv_multiridge.tuning.resolution),
range=multiridge_ranges,
measure=l2)
multiridge_loo_bruteforce_machine = machine(multiridge_loo_bruteforce, X, Y)
fit!(multiridge_loo_bruteforce_machine)
@test values(multiridge_loo_bruteforce_machine.report.best_model.λs) == values(loocv_multiridge_mach.report.best_param)
@test predict(multiridge_loo_bruteforce_machine) == predict(loocv_multiridge_mach)
# test SigmaRidgeRegression
groups = GroupedFeatures([30;50])
sigmaridge = SigmaRidgeRegressor(;groups=groups, σ=1.0, scale=false, center=false)
sigmaridge_machine = machine(sigmaridge, X, Y)
fit!(sigmaridge_machine)
sigmaridge_machine.cache.workspace.λs
predict(sigmaridge_machine)
loo_sigmaridge = LooRidgeRegressor(;ridge=sigmaridge, tuning=DefaultTuning(scale=:linear))
loo_sigmaridge_machine = machine(loo_sigmaridge, X, Y)
fit!(loo_sigmaridge_machine)
σs = loo_sigmaridge_machine.report.params
loo_σs = loo_sigmaridge_machine.report.loos
plot(σs, loo_σs)
loo_sigmaridge_machine.report.best_param
loo_sigmaridge_machine.report.best_λs
λ_path = vcat(loo_sigmaridge_machine.report.λs'...)
plot(σs, λ_path,xlim=(0,1.5), ylim=(0,20))
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | docs | 2970 | # SigmaRidgeRegression.jl
<img src="sigmaridge_logo.png" width="205">
[](https://github.com/nignatiadis/SigmaRidgeRegression.jl/actions)
[](https://codecov.io/gh/nignatiadis/SigmaRidgeRegression.jl)
Automatically and optimally-tuned Ridge regression when the features may be partitioned into groups.
See the manuscript below for a theoretical description of the method.
> Ignatiadis, Nikolaos, and Panagiotis Lolas. "Group-regularized ridge regression via
empirical Bayes noise level cross-validation." [arXiv:2010.15817](https://arxiv.org/abs/2010.15817) (2020+)
The folder `reproduction_code` in this repository contains code to reproduce the results of the paper.
## Installation
The package is available on the Julia registry (for Julia version 1.5) and may be installed as follows:
```julia
using Pkg
Pkg.add("SigmaRidgeRegression")
```
## Example usage
SigmaRidgeRegression.jl can be used alongside the [MLJ](https://github.com/alan-turing-institute/MLJ.jl) framework for machine learning in Julia.
```julia
# Suppose we have three groups of features, each with n observations
# and 25, 50 and 100 features respectively
n = 400
Random.seed!(1)
p1 = 25 ; X1 = randn(n, p1)
p2 = 50 ; X2 = randn(n, p2)
p3 = 100; X3 = randn(n, p3)
# The signal in the regression of the coefficients across these groups varies
α1_sq = 4.0 ; βs1 = randn(p1) .* sqrt(α1_sq / p1)
α2_sq = 8.0 ; βs2 = randn(p2) .* sqrt(α2_sq / p2)
α3_sq = 12.0; βs3 = randn(p3) .* sqrt(α3_sq / p3)
# Let us concatenate the results and create a response
X = [X1 X2 X3]
βs = [βs1; βs2; βs3]
σ = 4.0
Y = X*βs .+ σ .* randn(n)
# Let us make a `GroupedFeatures` object that describes the feature grouping
# !!NOTE!! Right now the features are expected to be ordered consecutively in groups
# i.e., the first p1 features belong to group 1 etc.
groups = GroupedFeatures([p1;p2;p3])
# Create MLJ machine and fit SigmaRidgeRegression:
sigma_model = LooSigmaRidgeRegressor(;groups=groups)
mach_sigma_model = machine(sigma_model, X, Y)
fit!(mach_sigma_model)
# How well are we estimating the true X*βs in mean squared error?
mean(abs2, X*βs .- predict(mach_sigma_model)) # 4.612726430034071
# In this case we may compare also to the Bayes risk
λs_opt = σ^2 ./ [α1_sq; α2_sq; α3_sq] .* groups.ps ./n
bayes = MultiGroupRidgeRegressor(;groups=groups, λs=λs_opt, center=false, scale=false)
mach_bayes = machine(bayes, X, Y)
fit!(mach_bayes)
mean(abs2, X*βs .- predict(mach_bayes)) #4.356913540118585
```
### TODOs
* Fully implement the MLJ interface.
* Wait for the following MLJ issue to be fixed: https://github.com/alan-turing-institute/MLJBase.jl/issues/428#issuecomment-708141459, in the meantime this package uses type piracy as in the above comments to accommodate the large number of features. | SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | docs | 92 | # SigmaRidgeRegression.jl
```@index
```
```@autodocs
Modules = [SigmaRidgeRegression]
```
| SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.2.0 | e0510282ebde7e9e1ca6ffe9dc255a110ee657f6 | docs | 1157 | # Reproduction code
## Setup
Start the `Julia` REPL (Julia version 1.5) in this folder and type `]` to activate the package manager. Then type and enter
```julia
activate .
```
followed by
```julia
instantiate
```
This will automatically install all required Julia dependencies.
For the `cll.jl` file, a R installation (it will be called through `RCall`) is also required with an installation of the `MOFAdata` package.
This package may be installed from within `R` as follows:
```r
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("MOFAdata")
```
## File description
* `motivation.jl`: Code to reproduce Figures 1 and 2.
* `oracle_risks.jl`: Code to reproduces Figures 4 and S1.
* `cll.jl`: Code to reproduce Tables 1 and 2.
* `million_songs.jl`: Code to reproduce Figures 5 and 6.
* `simulations.jl` and `simulations_plots.jl`: Code to reproduce Figure 7 of the manuscript. Note that you call `simulations.jl` for example via `julia simulations.jl 1` (and similarly up to `6`) and this generates files in `simulation_results`. These results are then loaded by `simulations_plots.jl`. | SigmaRidgeRegression | https://github.com/nignatiadis/SigmaRidgeRegression.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | code | 807 | using Documenter, AllanDeviations
makedocs(
format = :html,
sitename = "AllanDeviations.jl",
authors = "Julien Kluge",
pages = [
"Home" => "index.md",
"Installation Guide" => "installation.md",
"Quick Start Guide" => "quickstart.md",
"Library" => Any[
"API guide" => "lib/apiguide.md",
"Allan Deviation" => "lib/allandev.md",
"Modified Allan Deviation" => "lib/mallandev.md",
"Hadamard Deviation" => "lib/hadamarddev.md",
"Time Deviation" => "lib/timedev.md",
"Total Deviation" => "lib/totaldev.md",
"Max. time interval error" => "lib/mtie.md"
]
]
)
deploydocs(
repo = "github.com/JulienKluge/AllanDeviations.jl.git",
target = "build",
osname = "linux",
julia = "nightly",
deps = nothing,
make = nothing
)
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | code | 2535 | module AllanDeviations
#
# Exports/
#
export AllanTauDescriptor, AllTaus, QuarterOctave, HalfOctave, Octave, HalfDecade, Decade
export allandev
export mallandev
export hadamarddev
export timedev
export totaldev
export mtie
#
# /Exports
#
#
# Types/
#
abstract type AllanTauDescriptor end
struct AllTaus <: AllanTauDescriptor end
struct QuarterOctave <: AllanTauDescriptor end
struct HalfOctave <: AllanTauDescriptor end
struct Octave <: AllanTauDescriptor end
struct HalfDecade <: AllanTauDescriptor end
struct Decade <: AllanTauDescriptor end
#
# /Types
#
#
# Helper Functions/
#
function frequencytophase(data::AbstractArray{T, 1}, rate::AbstractFloat) where T
dt = 1 / rate
n = length(data) + 1
dataPrime = zeros(T, n)
walkingSum = zero(T)
@inbounds for i in 2:n #spare the first element so that the phase begins with zero
walkingSum += data[i - 1]
dataPrime[i] = walkingSum * dt
end
dataPrime
end
#tau-descriptor to m
function taudescription_to_m(::Type{AllTaus}, rate::AbstractFloat, n::Int)
1:(n - 2)
end
function taudescription_to_m(::Type{Decade}, rate::AbstractFloat, n::Int)
10 .^(0:Int(floor(log10(n))))
end
function taudescription_to_m(::Type{HalfDecade}, rate::AbstractFloat, n::Int)
5 .^(0:Int(floor(log(5.0, n))))
end
function taudescription_to_m(::Type{Octave}, rate::AbstractFloat, n::Int)
2 .^(0:Int(floor(log2(n))))
end
function taudescription_to_m(::Type{HalfOctave}, rate::AbstractFloat, n::Int)
unique(Int.(floor.(
1.5 .^(1:Int(floor(log(1.5, n))))
)))
end
function taudescription_to_m(::Type{QuarterOctave}, rate::AbstractFloat, n::Int)
unique(Int.(floor.(
1.25 .^(1:Int(floor(log(1.25, n))))
)))
end
#tau with custom log base value to m
function taudescription_to_m(taus::AbstractFloat, rate::AbstractFloat, n::Int)
if taus <= 1.0
error("Custom `taus`-log scale must be greater than 1.0")
end
unique(Int.(floor.(
taus .^(0:Int(floor(log(taus, n))))
)))
end
#tau with custom array to m
function taudescription_to_m(taus::Array{Float64}, rate::AbstractFloat, n::Int)
m = unique(Int.(floor.(rate .* taus)))
m[m .>= 1]
end
#tau with custom tau-length count
function taudescription_to_m(count::Integer, rate::AbstractFloat, n::Int)
unique(Int.(floor.(
1.125 .^(1:(log(1.125, n) / count):log(1.125, n))
)))
end
#
# /Helper Functions
#
#
# Exported functions/
#
include("dev_allan.jl")
include("dev_mallan.jl")
include("dev_hadamard.jl")
include("dev_time.jl")
include("dev_total.jl")
include("dev_mtie.jl")
#
# /Exported functions
#
end # AllanDeviations
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | code | 2477 | """
allandev(data, rate; [frequency=false], [overlapping=true], [taus=Octave])
Calculates the allan deviation
#parameters:
* `<data>`: The data array to calculate the deviation from either as as phases or frequencies.
* `<rate>`: The rate of the data given.
* `[frequency]`: True if `data` contains frequency data otherwise (default) phase data is assumed.
* `[overlapping]`: True (default) to calculate overlapping deviation, false otherwise.
* `[taus]`: Taus to calculate the deviation at. This can either be an AllanTauDescriptor type `AllTaus, Decadade, HalfDecade, Octave, HalfOctave, QuarterOctave`, an array of taus to calculate at, a float number to build a custom log-scale on or an integer to build a specific number of log spaced points.
#returns: named tupple (tau, deviation, error, count)
* `tau`: Taus which where used.
* `deviation`: Deviations calculated.
* `error`: Respective errors.
* `count`: Number of contributing terms for each deviation.
"""
function allandev(
data::AbstractArray{T, 1},
rate::AbstractFloat;
frequency::Bool = false,
overlapping::Bool = true,
taus::Union{Type{U}, Integer, AbstractFloat, Array{Float64}} = 192) where {T, U <: AllanTauDescriptor}
#frequency to phase calculation
if frequency
data = frequencytophase(data, rate)
end
n = length(data)
if n < 3
error("Length for `data` in allandev must be at least 3 or greater")
end
#tau calculations
m = taudescription_to_m(taus, rate, n)
dev = zeros(T, length(m)) #allandeviation
deverr = zeros(T, length(m)) #allandeviation error
devcount = zeros(Int, length(m)) #sum term count
mStride = 1 #overlapping - can be overwritten in loop for consecutive
@inbounds for (index, τ) in enumerate(m)
if !overlapping #overwrite stride for consecutive operation
mStride = τ
end
#allan deviation: http://www.leapsecond.com/tools/adev_lib.c
sum = zero(T)
i = 1
terms = 0
while (i + 2 * τ) <= n
v = data[i] - 2 * data[i + τ] + data[i + 2 * τ]
sum += v * v
i += mStride
terms += 1
end
if terms <= 1 #break the tau loop if no contribution with term-count > 1 is done
break
end
dev[index] = sqrt(sum / (2 * terms)) / τ * rate
deverr[index] = dev[index] / sqrt(terms)
devcount[index] = terms
end
selector = devcount .> 1 #select only entries, where 2 or more terms contributed to the deviation
(tau = m[selector] ./ rate, deviation = dev[selector], error = deverr[selector], count = devcount[selector])
end
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | code | 2518 | """
hadamarddev(data, rate; [frequency=false], [overlapping=true], [taus=Octave])
Calculates the hadamard deviation
#parameters:
* `<data>`: The data array to calculate the deviation from either as as phases or frequencies.
* `<rate>`: The rate of the data given.
* `[frequency]`: True if `data` contains frequency data otherwise (default) phase data is assumed.
* `[overlapping]`: True (default) to calculate overlapping deviation, false otherwise.
* `[taus]`: Taus to calculate the deviation at. This can either be an AllanTauDescriptor type `AllTaus, Decadade, HalfDecade, Octave, HalfOctave, QuarterOctave`, an array of taus to calculate at, a float number to build a custom log-scale on or an integer to build a specific number of log spaced points.
#returns: named tupple (tau, deviation, error, count)
* `tau`: Taus which where used.
* `deviation`: Deviations calculated.
* `error`: Respective errors.
* `count`: Number of contributing terms for each deviation.
"""
function hadamarddev(
data::AbstractArray{T, 1},
rate::AbstractFloat;
frequency::Bool = false,
overlapping::Bool = true,
taus::Union{Type{U}, Integer, AbstractFloat, Array{Float64}} = 192) where {T, U <: AllanTauDescriptor}
#frequency to phase calculation
if frequency
data = frequencytophase(data, rate)
end
n = length(data)
if n < 5
error("Length for `data` in hadamarddev must be at least 5 or greater")
end
#tau calculations
m = taudescription_to_m(taus, rate, n)
dev = zeros(T, length(m)) #hadamarddeviation
deverr = zeros(T, length(m)) #hadamarddeviation error
devcount = zeros(Int, length(m)) #sum term count
mStride = 1 #overlapping - can be overwritten in loop for consecutive
@inbounds for (index, τ) in enumerate(m)
if !overlapping #overwrite stride for consecutive operation
mStride = τ
end
#hadamard deviation: http://www.leapsecond.com/tools/adev_lib.c
sum = zero(T)
i = 1
terms = 0
while (i + 3 * τ) <= n
v = data[i + 3 * τ] - 3 * data[i + 2 * τ] + 3 * data[i + τ] - data[i]
sum += v * v
i += mStride
terms += 1
end
if terms <= 1 #break the tau loop if no contribution with term-count > 1 is done
break
end
dev[index] = sqrt(sum / (6 * terms)) / τ * rate
deverr[index] = dev[index] / sqrt(terms)
devcount[index] = terms
end
selector = devcount .> 1 #select only entries, where 2 or more terms contributed to the deviation
(tau = m[selector] ./ rate, deviation = dev[selector], error = deverr[selector], count = devcount[selector])
end
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | code | 2663 | """
mallandev(data, rate; [frequency=false], [overlapping=true], [taus=Octave])
Calculates the modified allan deviation
#parameters:
* `<data>`: The data array to calculate the deviation from either as as phases or frequencies.
* `<rate>`: The rate of the data given.
* `[frequency]`: True if `data` contains frequency data otherwise (default) phase data is assumed.
* `[overlapping]`: True (default) to calculate overlapping deviation, false otherwise.
* `[taus]`: Taus to calculate the deviation at. This can either be an AllanTauDescriptor type `AllTaus, Decadade, HalfDecade, Octave, HalfOctave, QuarterOctave`, an array of taus to calculate at, a float number to build a custom log-scale on or an integer to build a specific number of log spaced points.
#returns: named tupple (tau, deviation, error, count)
* `tau`: Taus which where used.
* `deviation`: Deviations calculated.
* `error`: Respective errors.
* `count`: Number of contributing terms for each deviation.
"""
function mallandev(
data::AbstractArray{T, 1},
rate::AbstractFloat;
frequency::Bool = false,
overlapping::Bool = true,
taus::Union{Type{U}, Integer, AbstractFloat, Array{Float64}} = 192) where {T, U <: AllanTauDescriptor}
#frequency to phase calculation
if frequency
data = frequencytophase(data, rate)
end
n = length(data)
if n < 4
error("Length for `data` in mallandev must be at least 4 or greater")
end
#tau calculations
m = taudescription_to_m(taus, rate, n)
dev = zeros(T, length(m)) #allandeviation
deverr = zeros(T, length(m)) #allandeviation error
devcount = zeros(Int, length(m)) #sum term count
mStride = 1 #overlapping - can be overwritten in loop for consecutive
@inbounds for (index, τ) in enumerate(m)
if !overlapping #overwrite stride for consecutive operation
mStride = τ
end
#allan deviation: http://www.leapsecond.com/tools/adev_lib.c
sum = zero(T)
v = zero(T)
i = 1
while (i + 2 * τ) <= n && i <= τ
v += data[i] - 2 * data[i + τ] + data[i + 2 * τ]
i += mStride
end
sum += v * v
terms = 1
i = 1
while (i + 3 * τ) <= n
v += data[i + 3 * τ] - 3 * data[i + 2 * τ] + 3 * data[i + τ] - data[i]
sum += v * v
i += mStride
terms += 1
end
if terms <= 1 #break the tau loop if no contribution with term-count > 1 is done
break
end
dev[index] = sqrt(sum / (2 * terms)) / (τ * τ) * rate
deverr[index] = dev[index] / sqrt(terms)
devcount[index] = terms
end
selector = devcount .> 1 #select only entries, where 2 or more terms contributed to the deviation
(tau = m[selector] ./ rate, deviation = dev[selector], error = deverr[selector], count = devcount[selector])
end
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | code | 3602 | #=
##
## TODO: mtie has great performance for phase data but extremely worse performance for frequency data
## Is this due to branch misprediction?
##
=#
"""
mtie(data, rate; [frequency=false], [overlapping=true], [taus=Octave])
Calculates the maximal time interval error
# parameters:
* `<data>`: The data array to calculate the deviation from either as as phases or frequencies.
* `<rate>`: The rate of the data given.
* `[frequency]`: True if `data` contains frequency data otherwise (default) phase data is assumed.
* `[overlapping]`: True (default) to calculate overlapping deviation, false otherwise.
* `[taus]`: Taus to calculate the deviation at. This can either be an AllanTauDescriptor type `AllTaus, Decadade, HalfDecade, Octave, HalfOctave, QuarterOctave`, an array of taus to calculate at, a float number to build a custom log-scale on or an integer to build a specific number of log spaced points.
# returns: named tupple (tau, deviation, error, count)
* `tau`: Taus which where used.
* `deviation`: Deviations calculated.
* `error`: Respective errors.
* `count`: Number of contributing terms for each deviation.
"""
function mtie(
data::AbstractArray{T, 1},
rate::AbstractFloat;
frequency::Bool = false,
overlapping::Bool = true,
taus::Union{Type{U}, Integer, AbstractFloat, Array{Float64}} = 192) where {T, U <: AllanTauDescriptor}
#frequency to phase calculation
if frequency
data = frequencytophase(data, rate)
end
n = length(data)
if n < 2
error("Length for `data` in mtie must be at least 2 or greater")
end
if !overlapping #warn for consecutive execution
@warn "It is highly unusual to use the mtie in the non overlapping form. Do not use this for definite interpretation or publication."
end
#tau calculations
m = taudescription_to_m(taus, rate, n)
dev = zeros(T, length(m)) #mtie
deverr = zeros(T, length(m)) #mtie error
devcount = zeros(Int, length(m)) #sum term count
mStride = 1 #overlapping - can be overwritten in loop for consecutive
@inbounds for (index, τ) in enumerate(m)
if !overlapping #overwrite stride for consecutive operation
mStride = τ
end
#mtie: https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication1065.pdf
terms = n - τ
if terms < 2
break
end
submin = data[1]
submax = data[1]
for j = 1:(1 + τ)
if data[j] < submin
submin = data[j]
elseif data[j] > submax
submax = data[j]
end
end
delta = submax - submin
maximumv = delta
for i = (1 + mStride):(mStride):(n - τ)
#max pipe
if data[i - mStride] == submax #rolling max-pipe is obsolete
submax = data[i]
for j = i:(i + τ)
if data[j] > submax
submax = data[j]
end
end
delta = submax - submin
elseif data[i + τ] > submax #if new element is bigger than the old one
submax = data[i + τ]
delta = submax - submin
end
#min pipe
if data[i - mStride] == submin #rolling min-pipe is obsolete
submin = data[i]
for j = i:(i + τ)
if data[j] < submin
submin = data[j]
end
end
delta = submax - submin
elseif data[i + τ] < submin #if new element is smaller than the old one
submin = data[i + τ]
delta = submax - submin
end
#comparer
if delta > maximumv
maximumv = delta
end
end
dev[index] = maximumv
deverr[index] = dev[index] / sqrt(terms)
devcount[index] = terms
end
selector = devcount .> 1 #select only entries, where 2 or more terms contributed to the deviation
(tau = m[selector] ./ rate, deviation = dev[selector], error = deverr[selector], count = devcount[selector])
end
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | code | 1648 | """
timedev(data, rate; [frequency=false], [overlapping=true], [taus=Octave])
Calculates the time deviation
#parameters:
* `<data>`: The data array to calculate the deviation from either as as phases or frequencies.
* `<rate>`: The rate of the data given.
* `[frequency]`: True if `data` contains frequency data otherwise (default) phase data is assumed.
* `[overlapping]`: True (default) to calculate overlapping deviation, false otherwise.
* `[taus]`: Taus to calculate the deviation at. This can either be an AllanTauDescriptor type `AllTaus, Decadade, HalfDecade, Octave, HalfOctave, QuarterOctave`, an array of taus to calculate at, a float number to build a custom log-scale on or an integer to build a specific number of log spaced points.
#returns: named tupple (tau, deviation, error, count)
* `tau`: Taus which where used.
* `deviation`: Deviations calculated.
* `error`: Respective errors.
* `count`: Number of contributing terms for each deviation.
"""
function timedev(
data::AbstractArray{T, 1},
rate::AbstractFloat;
frequency::Bool = false,
overlapping::Bool = true,
taus::Union{Type{U}, Integer, AbstractFloat, Array{Float64}} = 192) where {T, U <: AllanTauDescriptor}
n = length(data)
if n < 4
error("Length for `data` in timedev must be at least 4 or greater")
#we check this here, so that we can output the right function name in case of the error
end
(mdtaus, mddeviation, mderror, mdcount) = mallandev(data, rate, frequency = frequency, overlapping = overlapping, taus = taus)
mdm = mdtaus ./ sqrt(3)
(tau = mdtaus, deviation = mdm .* mddeviation, error = mdm .* mderror, count = mdcount)
end | AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | code | 3185 | """
totaldev(data, rate; [frequency=false], [overlapping=true], [taus=Octave])
Calculates the total deviation
#parameters:
* `<data>`: The data array to calculate the deviation from either as as phases or frequencies.
* `<rate>`: The rate of the data given.
* `[frequency]`: True if `data` contains frequency data otherwise (default) phase data is assumed.
* `[overlapping]`: True (default) to calculate overlapping deviation, false otherwise.
* `[taus]`: Taus to calculate the deviation at. This can either be an AllanTauDescriptor type `AllTaus, Decadade, HalfDecade, Octave, HalfOctave, QuarterOctave`, an array of taus to calculate at, a float number to build a custom log-scale on or an integer to build a specific number of log spaced points.
#returns: named tupple (tau, deviation, error, count)
* `tau`: Taus which where used.
* `deviation`: Deviations calculated.
* `error`: Respective errors.
* `count`: Number of contributing terms for each deviation.
"""
function totaldev(
data::AbstractArray{T, 1},
rate::AbstractFloat;
frequency::Bool = false,
overlapping::Bool = true,
taus::Union{Type{U}, Integer, AbstractFloat, Array{Float64}} = 192) where {T, U <: AllanTauDescriptor}
#frequency to phase calculation
if frequency
data = frequencytophase(data, rate)
end
n = length(data)
if n < 3
error("Length for `data` in totaldev must be at least 3 or greater")
end
if !overlapping #warn for consecutive execution
@warn "It is highly unusual to use the total deviation in the non overlapping form. Do not use this for definite interpretation or publication."
end
#tau calculations
m = taudescription_to_m(taus, rate, n)
#array reflection
dataPrime = zeros(Float64, 3 * n - 4)
datStart = 2 * data[1]
datEnd = 2 * data[n]
nm2 = n - 2
@inbounds for i = 1:nm2
dataPrime[i ] = datStart - data[n - i] #left reflection
dataPrime[i + nm2 ] = data[i] #original data from 1 to (n - 2)
dataPrime[i + nm2 + n] = datEnd - data[n - i] #right reflection
end
dataPrime[2 * nm2 + 1] = data[n - 1] #original data (n - 1)
dataPrime[2 * nm2 + 2] = data[n] #original data (n)
dev = zeros(T, length(m)) #totaldev
deverr = zeros(T, length(m)) #totaldev error
devcount = zeros(Int, length(m)) #sum term count
mStride = 1 #overlapping - can be overwritten in loop for consecutive
@inbounds for (index, τ) in enumerate(m)
if n - τ < 1
break
end
if !overlapping #overwrite stride for consecutive operation
mStride = τ
end
#hadamard deviation: http://www.leapsecond.com/tools/adev_lib.c
sum = zero(T)
i = n
terms = 0
while (i <= nm2 + n - 1)
v = dataPrime[i - τ] - 2 * dataPrime[i] + dataPrime[i + τ]
sum += v * v
i += mStride
terms += 1
end
if terms <= 1 #break the tau loop if no contribution with term-count > 1 is done
break
end
dev[index] = sqrt(sum / (2 * terms)) / τ * rate
deverr[index] = dev[index] / sqrt(terms)
devcount[index] = terms
end
selector = devcount .> 1 #select only entries, where 2 or more terms contributed to the deviation
(tau = m[selector] ./ rate, deviation = dev[selector], error = deverr[selector], count = devcount[selector])
end
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | code | 96 | using Test
using AllanDeviations
@testset "General Tests" begin include("test_general.jl") end
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | code | 9189 | using Test
using Random
#include("../src/AllanDeviations.jl")
#using .AllanDeviations
using AllanDeviations
arrInt = zeros(Int, 5)
arr32 = zeros(Float32, 5)
arr64 = zeros(Float64, 5)
resInt = allandev(arrInt, 1.0, taus = AllTaus)
res32 = allandev(arr32, 1.0, taus = AllTaus)
res64 = allandev(arr64, 1.0, taus = AllTaus)
#Basic result type test
@test isa(resInt.deviation, Array{Int})
@test isa(res32.deviation, Array{Float32})
@test isa(res64.deviation, Array{Float64})
@test isa(res32.tau, Array{Float64})
@test isa(res64.tau, Array{Float64})
@test isa(res32.error, Array{Float32})
@test isa(res64.error, Array{Float64})
@test isa(res32.count, Array{Int})
@test isa(res64.count, Array{Int})
#Allandev from zero arrays is zero
@test sum(res32.deviation) == 0
@test sum(res64.deviation) == 0
#which also holds then for their errors
@test sum(res32.error) == 0
@test sum(res64.error) == 0
#and the result should be the same lenth for both types
@test length(res32.deviation) == length(res64.deviation)
@test length(res32.error) == length(res64.error)
@test length(res32.tau) == length(res64.tau)
@test length(res32.count) == length(res64.count)
#rate tests
arr64 = [1.0, 2.0, 1.0, 2.0, 1.5, 1.5, 2.0, 1.75]
res64 = allandev(arr64, 1.0)
res64r = allandev(arr64, 0.5)
@test sum(abs.(res64.deviation .- (2.0 .* res64r.deviation))) <= 2e-16 #half the rate means half the allan deviation
@test sum(abs.(res64.tau .- (0.5 .* res64r.tau))) <= 2e-16 #and double the tau
@test sum(abs.(res64.count .- res64r.count)) == 0 #but the count stays
#too few data points
@test_throws ErrorException allandev([1.0], 1.0)
@test_throws ErrorException allandev(zeros(Float64, 0), 1.0)
@test_throws ErrorException mallandev([1.0], 1.0)
@test_throws ErrorException hadamarddev([1.0], 1.0)
@test_throws ErrorException timedev([1.0], 1.0)
@test_throws ErrorException totaldev([1.0], 1.0)
@test_throws ErrorException mtie([1.0], 1.0)
#tau errors
@test_throws ErrorException allandev(arr64, 1.0, taus = 1.0)
@test_throws ErrorException allandev(arr64, 1.0, taus = 0.5)
@test_throws ErrorException allandev(arr64, 1.0, taus = -2.0)
#result and comparison tests
resallan = allandev(arr64, 1.0, taus = AllTaus)
resmallan = mallandev(arr64, 1.0, taus = AllTaus)
reshadamard = hadamarddev(arr64, 1.0, taus = AllTaus)
restime = timedev(arr64, 1.0, taus = AllTaus)
restotal = totaldev(arr64, 1.0, taus = AllTaus)
resmtie1 = mtie(arr64, 1.0, taus = AllTaus)
resmtie2 = mtie([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], 1.0, taus = AllTaus)
resmtie3 = mtie([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0], 1.0, taus = AllTaus)
@test abs(resallan.deviation[1] - 0.97093168314425360) < 2e-16
@test abs(resallan.deviation[2] - 0.18221724671391565) < 2e-16
@test abs(resallan.deviation[3] - 0.20833333333333334) < 2e-16
@test abs(resmallan.deviation[1] - 0.9709316831442536) < 2e-16
@test abs(resmallan.deviation[2] - 0.0919975090242484) < 2e-16
@test abs(reshadamard.deviation[1] - 1.0616418102794056) < 2e-16
@test abs(reshadamard.deviation[2] - 0.1943203969393503) < 2e-16
@test abs(restime.deviation[1] - 0.56056766862807130) < 2e-16
@test abs(restime.deviation[2] - 0.10622957319984969) < 2e-16
@test abs(restotal.deviation[1] - 0.97093168314425360) < 2e-16
@test abs(restotal.deviation[2] - 0.35539004394233290) < 2e-16
@test abs(restotal.deviation[3] - 0.19982631347136330) < 2e-16
@test abs(restotal.deviation[4] - 0.27540216805004764) < 2e-16
@test abs(restotal.deviation[5] - 0.13919410907075053) < 2e-16
@test abs(restotal.deviation[6] - 0.14877975892797604) < 2e-16
@test length(resmtie1.deviation) == length(arr64) - 2
for i = 1:length(resmtie1.deviation)
@test abs(resmtie1.deviation[i] - 1.0) < 2e-16
@test abs(resmtie2.deviation[i] - i) < 2e-16
@test abs(resmtie3.deviation[i] - i) < 2e-16
end
@test resallan.deviation[1] == resmallan.deviation[1] #first element of allan deviation and modified allan deviation is the same
@test abs(resallan.deviation[1] / sqrt(3) - restime.deviation[1]) < 2e-16 #first element of allan deviation and modified allan deviation is the same
@test reshadamard.count[1] < resallan.count[1] #the hadamarddeviation iterates over four terms
@test reshadamard.count[1] < resmallan.count[1] #the hadamarddeviation iterates over four terms
@test abs(resmallan.deviation[2] / sqrt(3) * 2 - restime.deviation[2]) < 2e-16 #test time deviation calculation
#overlapping tests
Random.seed!(0xA11E4DE71A7104_00)
arr64 = rand(512)
resallan = allandev(arr64, 1.0, taus = AllTaus).count
resallan_o = allandev(arr64, 1.0, overlapping = true, taus = AllTaus).count
resallan_c = allandev(arr64, 1.0, overlapping = false, taus = AllTaus).count
resmallan_o = mallandev(arr64, 1.0, overlapping = true, taus = AllTaus).count
resmallan_c = mallandev(arr64, 1.0, overlapping = false, taus = AllTaus).count
reshadamard_o = hadamarddev(arr64, 1.0, overlapping = true, taus = AllTaus).count
reshadamard_c = hadamarddev(arr64, 1.0, overlapping = false, taus = AllTaus).count
restotal_o = totaldev(arr64, 1.0, overlapping = true, taus = AllTaus).count
resmtie_o = mtie(arr64, 1.0, overlapping = true, taus = AllTaus).count
@test length(resallan) == length(resallan_o) #overlapping is standard
@test length(resallan_o) == 255
@test length(resallan_c) == 170
@test sum(resallan_o) == 65280
@test sum(resallan_c) == 2674
@test length(resmallan_o) == 170
@test length(resmallan_c) == 170
@test sum(resmallan_o) == 43605
@test sum(resmallan_c) == 2674
@test length(reshadamard_o) == 170
@test length(reshadamard_c) == 127
@test sum(reshadamard_o) == 43435
@test sum(reshadamard_c) == 2461
@test length(restotal_o) == 510
@test sum(restotal_o) == 260100
@test length(resmtie_o) == 510
@test sum(resmtie_o) == 130815
#frequency conversion tests
resallan = allandev(arr64, 1.0, taus = AllTaus)
resallan_p = allandev(arr64, 1.0, frequency = false, taus = AllTaus)
resallan_f = allandev(arr64, 1.0, frequency = true, taus = AllTaus)
resallan_f_r = allandev(arr64, 0.5, frequency = true, taus = AllTaus)
arr64_f = zeros(Float64, length(arr64) + 1)
arr64_f[2:length(arr64_f)] = cumsum(arr64)
arr64_f_r = zeros(Float64, length(arr64) + 1)
arr64_f_r[2:length(arr64_f)] = cumsum(arr64 * 2.0)
resallan_f_a = allandev(arr64_f, 1.0, frequency = false, taus = AllTaus)
resallan_f_a_r = allandev(arr64_f_r, 1.0, frequency = false, taus = AllTaus)
resmallan = mallandev(arr64, 1.0, frequency = true, taus = AllTaus)
reshadamard = hadamarddev(arr64, 1.0, frequency = true, taus = AllTaus)
restime = timedev(arr64, 1.0, frequency = true, taus = AllTaus)
@test sum(abs.(resallan.deviation .- resallan_p.deviation)) < 2e-16 #phase is standard
@test sum(abs.(resallan_p.deviation .- resallan_f.deviation)) > 2e-16 #frequency is not the same
@test sum(abs.(resallan_f.deviation .- resallan_f_a.deviation)) < 2e-13 #frequency conversion
@test sum(abs.(resallan_f_r.deviation .- (resallan_f_a_r.deviation .* 0.5))) < 2e-13 #frequency conversion with different rate
@test sum(abs(resallan_f.deviation[1] - resmallan.deviation[1])) < 2e-15 #also test for mallandev
@test abs(reshadamard.deviation[1] - 0.29893879132526296) < 2e-16 #hadamarddev
@test abs(restime.deviation[1] - 0.17291213240777910) < 2e-16 #timedev
#taus tests
resallan_a = allandev(arr64, 1.0, taus = AllTaus).count
resallan_qo = allandev(arr64, 1.0, taus = QuarterOctave).count
resallan_ho = allandev(arr64, 1.0, taus = HalfOctave).count
resallan_o = allandev(arr64, 1.0, taus = Octave).count
resallan_hd = allandev(arr64, 1.0, taus = HalfDecade).count
resallan_d = allandev(arr64, 1.0, taus = Decade).count
#all tau arguments
taus_o_a = 2.0 .^(0:floor(log2(length(arr64))))
resallan_o_c = allandev(arr64, 1.0, taus = 2.0).count
resallan_o_a = allandev(arr64, 1.0, taus = taus_o_a).count
#all tau arguments mallandev
resmallan = mallandev(arr64, 1.0, taus = Octave).count
resmallan_o_c = mallandev(arr64, 1.0, taus = 2.0).count
resmallan_o_a = mallandev(arr64, 1.0, taus = taus_o_a).count
#all tau arguments hadamarddev
reshadamard = hadamarddev(arr64, 1.0, taus = Octave).count
reshadamard_o_c = hadamarddev(arr64, 1.0, taus = 2.0).count
reshadamard_o_a = hadamarddev(arr64, 1.0, taus = taus_o_a).count
@test length(resallan_a) == 255
@test sum(resallan_a) == 65280
@test length(resallan_qo) == 21
@test sum(resallan_qo) == 8680
@test length(resallan_ho) == 13
@test sum(resallan_ho) == 5506
@test length(resallan_o) == 8
@test sum(resallan_o) == 3586
@test length(resallan_o_c) == 8 #float tau argument
@test sum(resallan_o_c) == 3586 #float tau argument
@test length(resallan_o_a) == 8 #array tau argument
@test sum(resallan_o_a) == 3586 #array tau argument
@test length(resallan_hd) == 4
@test sum(resallan_hd) == 1736
@test length(resallan_d) == 3
@test sum(resallan_d) == 1314
#mallandev tau tests
@test length(resmallan) == length(resmallan_o_c)
@test length(resmallan_o_a) == length(resmallan_o_c)
@test sum(resmallan) == sum(resmallan_o_c)
@test sum(resmallan_o_a) == sum(resmallan_o_c)
#hadamarddev tau tests
@test length(reshadamard) == length(reshadamard_o_c)
@test length(reshadamard_o_a) == length(reshadamard_o_c)
@test sum(reshadamard) == sum(reshadamard_o_c)
@test sum(reshadamard_o_a) == sum(reshadamard_o_c)
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 4724 | 
AllanDeviations.jl is a package for the Julia programming language
and provides various frequency- and phase-stability deviations (or variances) in overlapping
and consecutive forms.
Currently implemented are:
|Deviation|Function name|Phase Data|Frequency Data|Consecutive|Overlapping|
|---------|:------------|:--------:|:------------:|:---------:|:---------:|
|Allan deviation|`allandev`|✓|✓|✓|✓|
|Modified Allan deviation|`mallandev`|✓|✓|✓|✓|
|Hadamard deviation|`hadamarddev`|✓|✓|✓|✓|
|Time deviation|`timedev`|✓|✓|✓|✓|
|Total deviation|`totaldev`|✓|✓|✓|✓|
|Maximal time interval error|`mtie`|✓|✓|✓|✓|

---
|Build & Tests<br>(Julia v1.0.0 & upwards)| |Code test coverage| | Documentation |
|-------------|-|------------------|-|------------------|
|[](https://ci.appveyor.com/project/JulienKluge/allandeviations-jl/branch/master)| |[](https://coveralls.io/github/JulienKluge/AllanDeviations.jl?branch=master)| | [](https://JulienKluge.github.io/AllanDeviations.jl/stable/) |
[](https://travis-ci.org/JulienKluge/AllanDeviations.jl)| |[](https://codecov.io/gh/JulienKluge/AllanDeviations.jl)| | [](https://JulienKluge.github.io/AllanDeviations.jl/latest/) |
---
## Usage
### Installing (v1.0.0 and higher)
To install AllanDeviations.jl open up a Julia REPL/Console and type:
```Julia
using Pkg;
Pkg.add("AllanDeviations")
```
This installs the package along with any dependencies needed.
### Example code
The following code calculates the overlapping Allan deviation of a given data array `arr` and rate `r` with log2 spaced averaging times τ.
```Julia
using AllanDeviations
(tau, deviation, error, count) = allandev(arr, r) #assuming phase data
(tau, deviation, error, count) = allandev(arr, r, frequency = true) #assuming frequency data
(tau, deviation, error, count) = allandev(arr, r, overlapping = false) #non-overlapping/consecutive
```
Every function returns a simple named tuple `(tau, deviation, error, count)`. `tau` is an array of the averaging times used. `deviation` are the respective calculated deviations, `error` the respective errors of the deviations and `count` is the number of terms contributing to every deviation.<br>The errors are calculated by `error .= deviation ./ sqrt.(count)`.
The averaging times τ can also be finely tuned:
```Julia
using AllanDeviations
allandev(arr, r) #log_2 spaced tau distribution: octave
allandev(arr, r, taus = AllTaus) #uses every possible tau value
allandev(arr, r, taus = Octave) #log_2 spaced: octave (default)
allandev(arr, r, taus = HalfOctave) #log_1.5 spaced: half octave
allandev(arr, r, taus = QuarterOctave) #log_1.25 spaced: quarter octave
allandev(arr, r, taus = Decade) #log_10 spaced: decade
allandev(arr, r, taus = HalfDecade) #log_5 spaced: half decade
allandev(arr, r, taus = 1.1) #log_1.1 spaced
allandev(arr, r, taus = [1.0, 2.0, 3.0]) #calculates at τ = 1.0, τ = 2.0 and τ = 3.0 if possible
allandev(arr, r, taus = [0.4]) #calculates at τ = 0.4 if possible
```
#### CSV example
This example shows how to load data from a csv file (via the CSV package), calculate the overlapping allan deviation with it and to output a plot (via the Plots package) in the same directory:
```Julia
using AllanDeviations, CSV, Plots
data = CSV.read("fractionalFrequencyData.csv") #load data
freq = data[:, 2] #extract fractional frequency
rate = length(data[:, 1]) / data[end, 1] #calculate rate
result = allandev(freq, rate, frequency = true) #calculate overlapping allan deviation
plot(result.tau, result.deviation, xscale = :log10, yscale = :log10) #log-log plot everything
savefig("allanDevPlot.png") #save the plot
```
> 
---
## References
+ [NIST - Riley, William J. "Handbook of frequency stability analysis." (2008): 81.](https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication1065.pdf)
+ [C Reference implementations: leapsecond.com](http://www.leapsecond.com/tools/adev_lib.c)
+ [Python Package: AllanTools](https://pypi.org/project/AllanTools/) | AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 1964 | # AllanDeviations.jl
AllanDeviations.jl is a package for the [Julia programming language](https://www.julialang.org).
It provides fast frequency and phase stability deviations/variances for different purposes and timescales
in a unified API ([API guide](@ref)) and plain Julia without any dependencies.
It was build and tested against Julia v1 and should be therefore upwards compatible for a long time.
## Implemented Deviations/Functions
#### [Allan deviation](@ref)
* Overlapping & non-overlapping
* Frequency- & phase data
* General purpose choice
#### [Modified Allan deviation](@ref)
* Overlapping & non-overlapping
* Frequency- & phase data
* Used to distinguish W and F PM
#### [Hadamard deviation](@ref)
* Overlapping & non-overlapping
* Frequency- & phase data
* Rejects frequency drift, and handles divergent noise
#### [Time deviation](@ref)
* Overlapping & non-overlapping
* Frequency- & phase data
* General time error of time source
#### [Total deviation](@ref)
* Overlapping & non-overlapping
* Frequency- & phase data
* Better confidence at long averages for Allan
#### [Maximum time interval error](@ref)
* Overlapping & non-overlapping
* Frequency- & phase data
* Measure of clock error commonly used in the tele-communications industry
## Example Calculation
This is an example plot of some AllanDeviations.jl calculations of a Potassium-D2-Frequency beat note from two reference lasers.

## References
The main algorithms where implemented with the help of a C Reference implementations from [leapsecond.com](http://www.leapsecond.com/tools/adev_lib.c) and the main Literature from [NIST - Riley, William J. "Handbook of frequency stability analysis." (2008): 81.](https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication1065.pdf).
The Python package [allantools](https://pypi.org/project/AllanTools/) was used as reference test implementation to verify the results against.
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 756 | # Installation guide
AllanDeviations.jl is registered in [Metadata.jl](https://github.com/JuliaLang/METADATA.jl/tree/metadata-v2/AllanDeviations).
Therefore it is part of the official, public package system.
You can download and install it in your global Julia installation (or local project)
via opening a Julia Console/REPL and type:
```Julia
using Pkg
Pkg.add("AllanDeviations")
```
This installs all necessary files. **This only needs to be done once!**
Afterwards, the package can be loaded by
```Julia
using AllanDeviations
```
## Updating
The package installation can be updated to the newest version with:
```Julia
using Pkg
Pkg.update("AllanDeviations")
```
Or even by updating all installed packages
```Julia
using Pkg
Pkg.update()
```
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 5216 | # Quickstart
## Installation
Install the package once in a Julia Console/REPL with:
```Julia
using Pkg
Pkg.add("AllanDeviations")
```
## Loading
The package can be loaded in every Julia program with a simple using directive
```Julia
using AllanDeviations
```
## Allan Deviation of random data
Print the overlapping Allan Deviation of one million random points with rate 1.0 at octave log-spaced taus:
```Julia
using AllanDeviations
arr = rand(Float64, 1_000_000)
result = allandev(arr, 1.0)
println(result.deviation)
```
## Other deviations
```Julia
result = allandev(arr, 1.0) #Allan deviation
result = mallandev(arr, 1.0) #Modified Allan deviation
result = hadamarddev(arr, 1.0) #Hadamard deviation
result = timedev(arr, 1.0) #Time deviation
result = totaldev(arr, 1.0) #Total deviation
result = mtie(arr, 1.0) #Maximum time interval error
```
## Full Data result
Every deviation method returns a named tuple in the form `(tau, deviation, error, count)`
```Julia
println("Calculated taus:")
println(result.tau)
println("Calculated Deviations:")
println(result.deviation)
println("Calculated errors:")
println(result.error)
println("Calculated Term Number:")
println(result.count)
```
### Same Result via tuple deconstruction
The returned tuple can already be deconstructed into variables on return.
```Julia
(myTaus, myDeviation, myError, myCount) = allandev(arr, 1.0)
```
## Calculating on frequency data
AllanDeviations.jl assumes by default that the `data`-argument contains phase data. This can be changed by setting the optional named argument `frequency` to `true` for frequency data.
```Julia
result = allandev(arr, 1.0, frequency = true)
```
## Non-Overlapping
AllanDeviations.jl will by default calculate the overlapping deviations.
This can be changed by setting the optional named argument `overlapping` to `false`.
```Julia
result = allandev(arr, 1.0, overlapping = false)
```
## Addressing different taus
```Julia
#Key-Types
allandev(arr, 1.0, taus = AllTaus) #all possible taus
allandev(arr, 1.0, taus = QuarterOctave) #quarter octave log-spaced
allandev(arr, 1.0, taus = HalfOctave) #half octave log-spaced
allandev(arr, 1.0, taus = Octave) #octave log-spaced
allandev(arr, 1.0, taus = HalfDecade) #hald decade log-spaced
allandev(arr, 1.0, taus = Decade) #decade log-spaced
#Explicit taus
allandev(arr, 1.0, taus = [2.0]) #calculate deviation at tau=2.0 if possible
allandev(arr, 1.0, taus = [2.0, 3.0, 4.0]) #calculate deviation at tau=2.0, tau=3.0 & tau=4.0 if possible
#Custom log scale
allandev(arr, 1.0, taus = 1.2) #calculate 1/5 of an octave log-spaced
#Custom log count
allandev(arr, 1.0, taus = 100) #calculate 100 log-spaced tau values between min and maximal possible tau
#This does not guarantee that 100 deviations will be calculated since some values will be discarded
#when less than two terms contributed to it
```
## Benchmark Deviations
#### Benchmark different overlapping deviations for one million datapoints and 200 taus
```Julia
using BenchmarkTools
arr = rand(Float64, 1_000_000);
@btime allandev(arr, 1.0, taus = 200); #Allan Deviation
@btime mallandev(arr, 1.0, taus = 200); #Modified Allan Deviation
@btime hadamarddev(arr, 1.0, taus = 200); #Hadamard Deviation
@btime timedev(arr, 1.0, taus = 200); #Time Deviation
@btime totaldev(arr, 1.0, taus = 200); #Total Deviation
@btime mtie(arr, 1.0, taus = 200); #Maximum time interval error
println("Done")
```
Results
```
315.247 ms (52 allocations: 35.91 KiB) #Allan Deviation
309.990 ms (52 allocations: 35.28 KiB) #Modified Allan Deviation
278.230 ms (52 allocations: 35.28 KiB) #Hadamard Deviation
309.647 ms (57 allocations: 39.33 KiB) #Time Deviation
331.483 ms (54 allocations: 22.92 MiB) #Total Deviation
901.942 ms (52 allocations: 35.91 KiB) #Maximum time interval error
Done
```
For comparison, pythons allantools needs approximately
3.5 seconds for the Allan deviation, 6.5 seconds for the total deviation and
an indeterminate amount of time for mtie (to be fair, allantools also provides a fastmtie which seems to be currently unfinished though).
#### Benchmark different overlapping deviations for 10.000 data points and all possible taus:
```Julia
using BenchmarkTools
arr = rand(Float64, 10_000)
@btime allandev(arr, 1.0, taus = AllTaus); #Allan Deviation
@btime mallandev(arr, 1.0, taus = AllTaus); #Modified Allan Deviation
@btime hadamarddev(arr, 1.0, taus = AllTaus); #Hadamard Deviation
@btime timedev(arr, 1.0, taus = AllTaus); #Time Deviation
@btime totaldev(arr, 1.0, taus = AllTaus); #Total Deviation
@btime mtie(arr, 1.0, taus = AllTaus); #Maximum time interval error
println("Done")
```
Results:
```
37.702 ms (30 allocations: 436.13 KiB) #Allan Deviation
39.805 ms (30 allocations: 371.13 KiB) #Modified Allan Deviation
28.266 ms (30 allocations: 371.13 KiB) #Hadamard Deviation
39.969 ms (51 allocations: 449.97 KiB) #Time Deviation
150.508 ms (32 allocations: 865.89 KiB) #Total Deviation
240.852 ms (30 allocations: 631.44 KiB) #Maximum time interval error
Done
```
However, these timings need to be taken with a grain of salt, since it does not represent real world data.
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 176 | # Allan deviation
## Formula
Allan variance
$$\sigma_y^2(\tau)=\frac{1}{2(N-2m)\tau^2}\sum_{j=1}^{N-2m}(x_{j+2m}-2x_{j+m}+x_{j})^2$$
## Doc String
```@docs
allandev(x)
```
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 1977 | # API guide
Every deviation function uses the same input and output structure.
```Julia
(tau, deviation, error, count) = XXXdev(data, rate; frequency = false, overlapping = true, taus = 192)
```
## Input Parameter
```Julia
data, rate; frequency = false, overlapping = true, taus = 192
```
* `data` - is the data to calculate the deviation from. It must be either phase data (default) or frequency data according to the `frequency` argument. The type of the array can be any possible numeric type and the deviations function are type stable.
* `rate` - is the rate as a Float, which describes the data capturing rate of your dataset.
* `[frequency]` *optional*, *named* - can be set to false (default) if the `data` argument contains phase data or `true` if the `data` argument contains frequency data.
* `[overlapping]` *optional*, *named* - can be set to true (default) for the overlapping deviation or false for the consecutive one.
* `[taus]` *optional*, *named* - describes at which averaging time the deviation should be calculated. This can be either:
- An `AllanTauDescriptor` type where there is: `AllTaus`, `QuarterOctave`, `HalfOctave`, `Octave`, `HalfDecade`, `Decade` and produces respective log-spaced points
- A Float Array which describes at which taus the deviation should be evaluated
- A Float which produces an according base-log-spaced array of taus
- An Integer (default, 192) which produces an array of equally many log-spaced taus. (Note: this does not mean, that exactly this count of deviations will be returned because some can be discarded due to too few contributing terms)
## Output Tuple
Every deviation returns a named output tuple:
```Julia
(tau, deviation, error, count)
```
* `tau` - the taus where the respective deviations got calculated on
* `deviation` - the deviations
* `error` - the respective deviation errors
* `count` - the respective count of contributing terms for each deviation (always 2 <= count < N)
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 205 | # Hadamard deviation
## Formula
Hadamard variance
$$H\sigma_y^2(\tau)=\frac{1}{6\tau^2(N-3m)}\sum_{j=1}^{N-3m}\left(x_{j+3}-3x_{j+2}+3x_{j+1}-x_{j}\right)^2$$
## Doc String
```@docs
hadamarddev(x)
```
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 238 | # Modified Allan deviation
## Formula
Modified Allan variance
$$Mod\,\sigma_y^2(\tau)=\frac{1}{2m^2\tau^2(N-3m+1)}\sum_{j=1}^{N-3m+1}\left(\sum_{t=j}^{j+m-1}[x_{t+2m}-2x_{t+m}+x_{t}]\right)^2$$
## Doc String
```@docs
mallandev(x)
```
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 637 | # Maximum time interval error
## Formula
Maximum time interval error
$$Mtie(\tau)=\operatorname{max}_{1\leq k\leq N-n}\left(\operatorname{max}_{k\leq t\leq k+n}(x_t)-\operatorname{min}_{k\leq t\leq k+n}(x_t)\right)$$
## Doc String
```@docs
mtie(x)
```
## Possible issues
* `mtie` in itself needs a great amount of computations and can be very slow for big taus with many data points. When computations need too much time, consider reducing the number of taus and/or especially using smaller taus.
* Mtie can be called with a non-overlapping calculation. This throws a warning because it is unusual to use but nevertheless faster.
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 143 | # Time deviation
## Formula
time variance
$$\sigma_x^2(\tau)=\frac{\tau^2}{3}Mod\,\sigma_y^2(\tau)$$
## Doc String
```@docs
timedev(x)
```
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 0.3.0 | 05e3b02540f98940ffa775d46860a48259372b6e | docs | 354 | # Total deviation
## Formula
total variance
$$Tot\,\sigma_y^2(\tau)=\frac{1}{2\tau^2(N-2)}\sum_{j=2}^{N-1}\left(x_{j-m}^*-2x_{j}^*+x_{j+m}^*\right)^2$$
## Doc String
```@docs
totaldev(x)
```
## Possible issues
* `totaldev` can be called with a non-overlapping calculation. This throws a warning because it is unusual to use but nevertheless faster.
| AllanDeviations | https://github.com/JulienKluge/AllanDeviations.jl.git |
|
[
"MIT"
] | 1.3.0 | 8a4aef12779f4becb49be401efc6f57777b5bc2c | code | 534 | using Documenter, Zalgo
makedocs(
modules = [Zalgo],
sitename = "Zalgo",
warnonly = true,
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true",
size_threshold=nothing,
collapselevel=1,
assets = ["assets/zalgo-docs.css"]
),
pages = Any[
"Introduction" => "index.md",
"Index" => "functionindex.md"
]
)
deploydocs(
repo = "github.com/cormullion/Zalgo.jl.git",
target = "build",
push_preview=true,
forcepush=true
)
| Zalgo | https://github.com/cormullion/Zalgo.jl.git |
|
[
"MIT"
] | 1.3.0 | 8a4aef12779f4becb49be401efc6f57777b5bc2c | code | 15754 | """
Zalgo.jl does two things.
- It adds pointless diacritics to text: `zalgo("Cthulhu")`
- It converts an input ASCII string to equivalent characters found in the darkest recesses of the Unicode charts:
```
blackboard("Hello World")
boldfraktur("Hello World")
bolditalic("Hello World")
bolditalicsans("Hello World")
boldroman("Hello World")
boldsans("Hello World")
boldscript("Hello World")
boxed("hello world")
circled("HELLO WORLD")
fraktur("Hello World")
italic("Hello World")
italicsans("Hello World")
sans("Hello World")
script("Hello World")
segmented("0123456789")
teletype("Hello World")
upsidedown("Hello World")
```
```large_type("Hello World")``` displays the text using
the Large Type glyphs added to Unicode in version 16.
"""
module Zalgo
export zalgo, boldfraktur, bolditalic, bolditalicsans,
boldroman, boldsans, boldscript, fraktur, italic,
italicsans, sans, script, teletype, upsidedown, blackboard,
boxed, circled, segmented, large_type
include("largetype.jl")
const updc = vcat('\u0300':'\u0315', '\u031A', '\u033D',
'\u034A':'\u034C', '\u0350':'\u0352',
'\u0357':'\u0358', '\u035B', '\u035D', '\u035E',
'\u0360':'\u0361', '\u0363':'\u036F',
'\uFE20':'\uFE22')
const middledc = vcat('\u031B', '\u0334':'\u0338')
const downdc = vcat('\u0316':'\u0319', '\u031C':'\u0333',
'\u0339':'\u033C', '\u0347':'\u0349',
'\u034D':'\u034E', '\u0353':'\u0356', '\u0359',
'\u035A', '\u035C', '\u035F', '\u0362')
const upsidedowndict = Dict{String,String}("a" => "ɐ",
"b" => "q", "c" => "ɔ", "d" => "p", "e" => "ǝ", "f" => "ɟ", "g" => "ƃ",
"h" => "ɥ", "i" => "ı", "j" => "ɾ", "k" => "ʞ", "l" => "ן", "m" => "ɯ",
"n" => "u", "o" => "o", "p" => "d", "q" => "b", "r" => "ɹ", "s" => "s",
"t" => "ʇ", "u" => "n", "v" => "ʌ", "w" => "ʍ", "x" => "x", "y" => "ʎ",
"z" => "z", "A" => "∀", "B" => "𐐒", "C" => "Ɔ", "D" => "◖", "E" => "Ǝ",
"F" => "Ⅎ", "G" => "⅁", "H" => "H", "I" => "I", "J" => "ſ", "K" => "⋊",
"L" => "˥", "M" => "W", "N" => "N", "O" => "O", "P" => "Ԁ", "Q" => "Ό",
"R" => "ᴚ", "S" => "S", "T" => "⊥", "U" => "∩", "V" => "Λ", "W" => "M",
"X" => "X", "Y" => "⅄", "Z" => "Z", "0" => "0", "1" => "Ɩ", "2" => "ᄅ",
"3" => "Ɛ", "4" => "ㄣ", "5" => "ϛ", "6" => "9", "7" => "ㄥ", "8" => "8",
"9" => "6", " " => " ")
# These are a shambles, Unicode Consortium!
# build the fraktur dict
frakturdict = Dict{String,Char}()
[frakturdict[string(Char(i + 64))] = vcat(
'\U1D504', # A
'\U1D505', # B
'\u212d', # C !
'\U1D507', # D
'\U1D508', # E
'\U1D509', # F
'\U1D50a', # G
'\u210c', # H !
'\u2111', # I !
'\U1D50d', # J
'\U1D50e', # K
'\U1D50f', # L
'\U1D510', # M
'\U1D511', # N
'\U1D512', # O
'\U1D513', # P
'\U1D514', # Q
'\u211c', # R !
'\U1D516', # S
'\U1D517', # T
'\U1D518', # U
'\U1D519', # V
'\U1D51a', # W
'\U1D51b', # X
'\U1D51c', # Y
'\u2128' # Z !
)[i] for i = 1:26]
[frakturdict[string(Char(i + 96))] = vcat('\U1D586':'\U1D59F')[i] for i = 1:26]
frakturdict[" "] = ' '
# build the blackboard (double-struck) dict
blackboarddict = Dict{String,Char}()
[blackboarddict[string(Char(i + 64))] = vcat(
'\U1D538', # A
'\U1D539', # B
'\u2102', # C !
'\U1D53B', # D
'\U1D53c', # E
'\U1D53d', # F
'\U1D53e', # G
'\u210D', # H !
'\U1D540', # I !
'\U1D541', # J
'\U1D542', # K
'\U1D543', # L
'\U1D544', # M
'\u2115', # N !
'\U1D546', # O
'\u2119', # P !
'\u211a', # Q !
'\u211d', # R
'\U1D54A', # S
'\U1D54B', # T
'\U1D54C', # U
'\U1D54D', # V
'\U1D54E', # W
'\U1D54f', # X
'\U1D550', # Y
'\u2124' # Z !
)[i] for i = 1:26]
[blackboarddict[string(Char(i + 96))] = vcat('\U1D552':'\U1D56B')[i] for i = 1:26]
# digits
[blackboarddict[string(Char(i + 48))] = vcat('\U1D7D8':'\U1D7E1')[i+1] for i in 0:9]
blackboarddict[" "] = ' '
# build the Script dict
scriptdict = Dict{String,Char}()
[scriptdict[string(Char(i + 64))] = vcat('\U1D49C':'\U1D4B5')[i] for i = 1:26]
[scriptdict[string(Char(i + 96))] = vcat('\U1D4b6':'\U1D4cf')[i] for i = 1:26]
scriptdict["B"] = '\u212c'
scriptdict["E"] = '\u2130'
scriptdict["F"] = '\u2131'
scriptdict["H"] = '\u210b'
scriptdict["I"] = '\u2110'
scriptdict["L"] = '\u2112'
scriptdict["M"] = '\u2133'
scriptdict["R"] = '\u211B'
scriptdict["e"] = '\u212f'
scriptdict["g"] = '\u210a'
scriptdict["o"] = '\u2134'
scriptdict[" "] = ' '
# build the boldroman dict
boldromandict = Dict{String,Char}()
[boldromandict[string(Char(i + 64))] = vcat('\U1D400':'\U1D419')[i] for i = 1:26]
[boldromandict[string(Char(i + 96))] = vcat('\U1D41a':'\U1D433')[i] for i = 1:26]
[boldromandict[string(Char(i + 48))] = vcat('\U1D7CE':'\U1D7D7')[i+1] for i = 0:9]
boldromandict[" "] = ' '
# build the italic dict
italicdict = Dict{String,Char}()
[italicdict[string(Char(i + 64))] = vcat('\U1D434':'\U1D44d')[i] for i = 1:26]
[italicdict[string(Char(i + 96))] = vcat('\U1D44e':'\U1D467')[i] for i = 1:26]
italicdict[" "] = ' '
# build the bolditalic dict
bolditalicdict = Dict{String,Char}()
[bolditalicdict[string(Char(i + 64))] = vcat('\U1D468':'\U1D481')[i] for i = 1:26]
[bolditalicdict[string(Char(i + 96))] = vcat('\U1D482':'\U1D49b')[i] for i = 1:26]
bolditalicdict[" "] = ' '
# build the boldscript dict
boldscriptdict = Dict{String,Char}()
[boldscriptdict[string(Char(i + 64))] = vcat('\U1D4D0':'\U1D4E9')[i] for i = 1:26]
[boldscriptdict[string(Char(i + 96))] = vcat('\U1D4ea':'\U1D503')[i] for i = 1:26]
boldscriptdict[" "] = ' '
# build the boldfraktur dict
boldfrakturdict = Dict{String,Char}()
[boldfrakturdict[string(Char(i + 64))] = vcat('\U1D56c':'\U1D585')[i] for i = 1:26]
[boldfrakturdict[string(Char(i + 96))] = vcat('\U1D586':'\U1D59f')[i] for i = 1:26]
boldfrakturdict[" "] = ' '
# build the sans dict
sansdict = Dict{String,Char}()
[sansdict[string(Char(i + 64))] = vcat('\U1D5A0':'\U1D5B9')[i] for i = 1:26]
[sansdict[string(Char(i + 96))] = vcat('\U1D5ba':'\U1D5d3')[i] for i = 1:26]
[sansdict[string(Char(i + 48))] = vcat('\U1D7E2':'\U1D7EB')[i+1] for i = 0:9]
sansdict[" "] = ' '
# build the boldsans dict
boldsansdict = Dict{String,Char}()
[boldsansdict[string(Char(i + 64))] = vcat('\U1D5d4':'\U1D5ed')[i] for i = 1:26]
[boldsansdict[string(Char(i + 96))] = vcat('\U1D5ee':'\U1D607')[i] for i = 1:26]
[boldsansdict[string(Char(i + 48))] = vcat('\U1D7EC':'\U1D7F5')[i+1] for i = 0:9]
boldsansdict[" "] = ' '
# build the italicsans dict
italicsansdict = Dict{String,Char}()
[italicsansdict[string(Char(i + 64))] = vcat('\U1D608':'\U1D621')[i] for i = 1:26]
[italicsansdict[string(Char(i + 96))] = vcat('\U1D622':'\U1D63b')[i] for i = 1:26]
italicsansdict[" "] = ' '
# build the bolditalicsans dict
bolditalicsansdict = Dict{String,Char}()
[bolditalicsansdict[string(Char(i + 64))] = vcat('\U1D63c':'\U1D655')[i] for i = 1:26]
[bolditalicsansdict[string(Char(i + 96))] = vcat('\U1D656':'\U1D66f')[i] for i = 1:26]
bolditalicsansdict[" "] = ' '
# build the teletype dict
ttdict = Dict{String,Char}()
[ttdict[string(Char(i + 64))] = vcat('\U1D670':'\U1D689')[i] for i = 1:26]
[ttdict[string(Char(i + 96))] = vcat('\U1D68a':'\U1D6a3')[i] for i = 1:26]
[ttdict[string(Char(i + 48))] = vcat('\U1D7F6':'\U1D7FF')[i+1] for i = 0:9]
ttdict[" "] = ' '
# build the boxed dict
boxeddict = Dict{String,Char}()
[boxeddict[string(Char(i + 64))] = vcat('\U1F130':'\U1F14A')[i] for i = 1:26]
[boxeddict[string(Char(i + 96))] = vcat('\U1F170':'\U1F18A')[i] for i = 1:26]
boxeddict[" "] = ' '
# build the circled dict
circleddict = Dict{String,Char}()
[circleddict[string(Char(i + 64))] = vcat('\u24b6':'\u24cf')[i] for i = 1:26]
[circleddict[string(Char(i + 96))] = vcat('\u24d0':'\u24e9')[i] for i = 1:26]
[circleddict[string(Char(i + 0x30))] = vcat('\u2460':'\u2468')[i] for i = 1:9]
# 24EA 0 CIRCLED DIGIT ZERO is separate !!!
circleddict[string(Char(0x30))] = '\u24ea'
circleddict[" "] = ' '
# negativecircled dict
# build the circled dict
negativecircleddict = Dict{String,Char}()
[negativecircleddict[string(Char(i + 64))] = vcat('\U1F150':'\U1F169')[i] for i = 1:26]
[negativecircleddict[string(Char(i + 96))] = vcat('\U1F170':'\U1F189')[i] for i = 1:26]
negativecircleddict[" "] = ' '
# build the segmented dict
segmenteddict = Dict{String,Char}()
[segmenteddict[string(Char(i + 0x2f))] = vcat('\U1FBF0':'\U1FBF9')[i] for i = 1:10]
segmenteddict[" "] = ' '
function adddc(letter, dc)
return string(letter) * string(dc[rand(1:end)])
end
"""
zalgo(str::String, upmarks = 1:4, middlemarks = 1:4,
downmarks = 1:4, maxmarks = 6)
Randomly add up to `maxmarks` diacritic marks to each letter of `str`. The `upmarks`,
`middlemarks`, and `downmarks` ranges determine the minimum and maximum number of
diacritic marks added to the letter at that position.
"""
function zalgo(text::String;
upmarks=1:4,
middlemarks=1:4,
downmarks=1:4,
maxmarks=6)
letters = split(text, "")
zalgostring = String[]
for letter in letters
# can't add a diacritic mark to some letters
if !all(isletter, letter)
push!(zalgostring, letter)
continue
end
upmarks_added = rand(upmarks.start:upmarks.stop)
downmarks_added = rand(downmarks.start:downmarks.stop)
middlemarks_added = rand(middlemarks.start:middlemarks.stop)
newletter = letter
for i in 1:maxmarks
randint = rand(1:2)
if randint == 1
if upmarks_added > 0
newletter = adddc(newletter, updc)
upmarks_added -= 1
end
elseif randint == 2
if downmarks_added > 0
newletter = adddc(newletter, downdc)
downmarks_added -= 1
end
else
if middlemarks_added > 0
newletter = adddc(newletter, middledc)
middlemarks_added -= 1
end
end
end
push!(zalgostring, newletter)
end
return join(zalgostring)
end
"""
upsidedown(str)
Return a version of string `str` with upside down letters from the Unicode table.
"""
function upsidedown(str)
asciistr = filter!(c -> haskey(upsidedowndict, c), split(str, ""))
return join(map(c -> upsidedowndict[c], asciistr))
end
"""
fraktur(str)
Return a version of string `str` with Fraktur letters from the Unicode table.
"""
function fraktur(str)
asciistr = filter!(c -> haskey(frakturdict, c), split(str, ""))
return join(map(c -> frakturdict[c], asciistr))
end
"""
script(str; roundhand=false)
Return a version of string `str` with mathematical script letters from the Unicode table.
There are two basic styles of mathematical script lettering: the “regular”
calligraphic or Chancery alphabet, and the “fancy script” or round hand alphabet.
By default, the script style will be “script”. If `roundhand` is true, the style will
be “roundhand”.
For more details, see [this Unicode document](https://www.unicode.org/L2/L2020/20275r-math-calligraphic.pdf).
"""
function script(str;
roundhand=false)
asciistr = filter!(c -> haskey(scriptdict, c), split(str, ""))
if roundhand == true
return join(map(c -> string(Zalgo.scriptdict[c], Char(0xFE01)), asciistr))
else
return join(map(c -> Zalgo.scriptdict[c], asciistr))
end
end
"""
boldroman(str)
Return a version of string `str` with boldroman letters from the Unicode table.
"""
function boldroman(str)
asciistr = filter!(c -> haskey(boldromandict, c), split(str, ""))
return join(map(c -> boldromandict[c], asciistr))
end
"""
italic(str)
Return a version of string `str` with italic letters from the Unicode table.
"""
function italic(str)
asciistr = filter!(c -> haskey(italicdict, c), split(str, ""))
return join(map(c -> italicdict[c], asciistr))
end
"""
bolditalic(str)
Return a version of string `str` with bolditalic letters from the Unicode table.
"""
function bolditalic(str)
asciistr = filter!(c -> haskey(bolditalicdict, c), split(str, ""))
return join(map(c -> bolditalicdict[c], asciistr))
end
"""
boldscript(str)
Return a version of string `str` with boldscript letters from the Unicode table.
"""
function boldscript(str)
asciistr = filter!(c -> haskey(boldscriptdict, c), split(str, ""))
return join(map(c -> boldscriptdict[c], asciistr))
end
"""
boldfraktur(str)
Return a version of string `str` with boldfraktur letters from the Unicode table.
"""
function boldfraktur(str)
asciistr = filter!(c -> haskey(boldfrakturdict, c), split(str, ""))
return join(map(c -> boldfrakturdict[c], asciistr))
end
"""
sans(str)
Return a version of string `str` with sans serif letters from the Unicode table.
"""
function sans(str)
asciistr = filter!(c -> haskey(sansdict, c), split(str, ""))
return join(map(c -> sansdict[c], asciistr))
end
"""
boldsans(str)
Return a version of string `str` with bold sans serif letters from the Unicode table.
"""
function boldsans(str)
asciistr = filter!(c -> haskey(boldsansdict, c), split(str, ""))
return join(map(c -> boldsansdict[c], asciistr))
end
"""
italicsans(str)
Return a version of string `str` with italic sans serif letters from the Unicode table.
"""
function italicsans(str)
asciistr = filter!(c -> haskey(italicsansdict, c), split(str, ""))
return join(map(c -> italicsansdict[c], asciistr))
end
"""
bolditalicsans(str)
Return a version of string `str` with bold italic sans serif letters from the Unicode table.
"""
function bolditalicsans(str)
asciistr = filter!(c -> haskey(bolditalicsansdict, c), split(str, ""))
return join(map(c -> bolditalicsansdict[c], asciistr))
end
"""
teletype(str)
Return a version of string `str` with teletype (monospaced) letters from the Unicode table.
"""
function teletype(str)
asciistr = filter!(c -> haskey(ttdict, c), split(str, ""))
return join(map(c -> ttdict[c], asciistr))
end
"""
blackboard(str)
Return a version of string `str` with blackboard (double-struck) letters from the Unicode table.
"""
function blackboard(str)
asciistr = filter!(c -> haskey(blackboarddict, c), split(str, ""))
return join(map(c -> blackboarddict[c], asciistr))
end
"""
boxed(str)
Return a version of string `str` with boxed letters from the Unicode table.
```
boxed("A") -> "🄰"
boxed("a") -> "🅰"
```
"""
function boxed(str)
asciistr = filter!(c -> haskey(boxeddict, c), split(str, ""))
return join(map(c -> boxeddict[c], asciistr))
end
"""
circled(str)
Return a version of string `str` with circled/boxed letters from the Unicode table.
```
A-Z "A" -> "Ⓐ" \u24b6:\u24cf
a-z "a" -> "ⓐ" \u24d0:\u24e9
0-9 "0" -> "⓪" \u2460:\u2468
A-Z inverse "A" -> "🅐" \U1F150:\U1F169
a-z inverse "a" -> "🅰" \U1F170:\U1F189
```
"""
function circled(str;
negative=false)
if negative
asciistr = filter!(c -> haskey(negativecircleddict, c), split(str, ""))
return join(map(c -> negativecircleddict[c], asciistr))
else
asciistr = filter!(c -> haskey(circleddict, c), split(str, ""))
return join(map(c -> circleddict[c], asciistr))
end
end
"""
segmented(str)
Return a version of string `str` with LED-style digits from the Unicode table.
"""
function segmented(str)
asciistr = filter!(c -> haskey(segmenteddict, c), split(str, ""))
return join(map(c -> segmenteddict[c], asciistr))
end
end # module
| Zalgo | https://github.com/cormullion/Zalgo.jl.git |
|
[
"MIT"
] | 1.3.0 | 8a4aef12779f4becb49be401efc6f57777b5bc2c | code | 6365 | # thanks to Philippe Majerus https://github.com/PhMajerus for documenting these
# in some detail
"""
A single "LargeType" glyph, using the system of pieces defined in Unicode version 16.
Each glyph is made up of nine pieces, each in the range U1CE1A to U1CE50.
"""
struct LargeTypeChar
topleft::Char
topcenter::Char
topright::Char
middleleft::Char
middlecenter::Char
middleright::Char
bottomleft::Char
bottomcenter::Char
bottomright::Char
end
"""
A sequence of LargeType glyphs
"""
struct LargeTypeString
glyphs::Array{LargeTypeChar,1}
end
"""
Construct a LargeType glyph from a string.
"""
function _buildLargeTypeChar(str)
if length(str) != 9
throw(error("_buildLargeTypeChar: exactly nine characters required - $(str) is $(length(str))"))
end
return LargeTypeChar([Char(e) for e in str]...)
end
"""
large_type(str)
Return a string of Unicode glyphs that will display the `str` as "Large Type". Each
glyph is made up to 9 small pieces.
Like the other functions, the current font should have these characters
(at Unicode points range U+1CE1A to U+1CE50) otherwise you probably won't sequence
anything.
"""
function large_type(str)
res = LargeTypeString([])
for ch in filter(isascii, str)
push!(res.glyphs, LTD[string(ch)])
end
return res
end
function Base.show(io::IO, ltc::LargeTypeChar)
println(io)
print(io, ltc.topleft)
print(io, ltc.topcenter)
print(io, ltc.topright)
println(io)
print(io, ltc.middleleft)
print(io, ltc.middlecenter)
print(io, ltc.middleright)
println(io)
print(io, ltc.bottomleft)
print(io, ltc.bottomcenter)
print(io, ltc.bottomright)
println(io)
end
function Base.show(io::IO, ltcs::LargeTypeString)
# do all the top row glyphs first
for i in 1:length(ltcs.glyphs)
print(io, ltcs.glyphs[i].topleft)
print(io, ltcs.glyphs[i].topcenter)
print(io, ltcs.glyphs[i].topright)
end
println()
for i in 1:length(ltcs.glyphs)
print(io, ltcs.glyphs[i].middleleft)
print(io, ltcs.glyphs[i].middlecenter)
print(io, ltcs.glyphs[i].middleright)
end
println()
for i in 1:length(ltcs.glyphs)
print(io, ltcs.glyphs[i].bottomleft)
print(io, ltcs.glyphs[i].bottomcenter)
print(io, ltcs.glyphs[i].bottomright)
end
println()
end
LTD = Dict(
" " => _buildLargeTypeChar(" "),
"!" => _buildLargeTypeChar(" "),
"\"" => _buildLargeTypeChar(" "),
"#" => _buildLargeTypeChar(" "),
"\$" => _buildLargeTypeChar(""),
"%" => _buildLargeTypeChar(" "),
"&" => _buildLargeTypeChar(" "),
"'" => _buildLargeTypeChar(" "),
"(" => _buildLargeTypeChar(" "),
")" => _buildLargeTypeChar(" "),
"*" => _buildLargeTypeChar(" "),
"+" => _buildLargeTypeChar(" "),
"," => _buildLargeTypeChar(" "),
"-" => _buildLargeTypeChar(" "),
"." => _buildLargeTypeChar(" ▘ "),
"/" => _buildLargeTypeChar(" "),
"0" => _buildLargeTypeChar(" "),
"1" => _buildLargeTypeChar(" "),
"2" => _buildLargeTypeChar(""),
"3" => _buildLargeTypeChar(" "),
"4" => _buildLargeTypeChar(" "),
"5" => _buildLargeTypeChar(""),
"6" => _buildLargeTypeChar(""),
"7" => _buildLargeTypeChar(" "),
"8" => _buildLargeTypeChar(""),
"9" => _buildLargeTypeChar(""),
":" => _buildLargeTypeChar(" ▘ ▘ "),
";" => _buildLargeTypeChar(" "),
"<" => _buildLargeTypeChar(" "),
"=" => _buildLargeTypeChar(" "),
">" => _buildLargeTypeChar(" "),
"?" => _buildLargeTypeChar(" "),
"@" => _buildLargeTypeChar(""),
"A" => _buildLargeTypeChar(" "),
"B" => _buildLargeTypeChar(""),
"C" => _buildLargeTypeChar(" "),
"D" => _buildLargeTypeChar(" "),
"E" => _buildLargeTypeChar(" "),
"F" => _buildLargeTypeChar(" "),
"G" => _buildLargeTypeChar(" "),
"H" => _buildLargeTypeChar(" "),
"I" => _buildLargeTypeChar(" "),
"J" => _buildLargeTypeChar(" "),
"K" => _buildLargeTypeChar(" "),
"L" => _buildLargeTypeChar(" "),
"M" => _buildLargeTypeChar(" "),
"N" => _buildLargeTypeChar(" "),
"O" => _buildLargeTypeChar(" "),
"P" => _buildLargeTypeChar(" "),
"Q" => _buildLargeTypeChar(" "),
"R" => _buildLargeTypeChar(" "),
"S" => _buildLargeTypeChar(""),
"T" => _buildLargeTypeChar(" "),
"U" => _buildLargeTypeChar(" "),
"V" => _buildLargeTypeChar(" "),
"W" => _buildLargeTypeChar(" "),
"X" => _buildLargeTypeChar(" "),
"Y" => _buildLargeTypeChar(" "),
"Z" => _buildLargeTypeChar(""),
"[" => _buildLargeTypeChar(" "),
"\\" => _buildLargeTypeChar(" "),
"]" => _buildLargeTypeChar(" "),
"^" => _buildLargeTypeChar(" "),
"_" => _buildLargeTypeChar(" ▀▀▀"),
"`" => _buildLargeTypeChar(" "),
"a" => _buildLargeTypeChar(" "),
"b" => _buildLargeTypeChar(" "),
"c" => _buildLargeTypeChar(" "),
"d" => _buildLargeTypeChar(" "),
"e" => _buildLargeTypeChar(" "),
"f" => _buildLargeTypeChar(" "),
"g" => _buildLargeTypeChar(" "),
"h" => _buildLargeTypeChar(" "),
"i" => _buildLargeTypeChar(" "),
"j" => _buildLargeTypeChar(" "),
"k" => _buildLargeTypeChar(" "),
"l" => _buildLargeTypeChar(" "),
"m" => _buildLargeTypeChar(" "),
"n" => _buildLargeTypeChar(" "),
"o" => _buildLargeTypeChar(" "),
"p" => _buildLargeTypeChar(" "),
"q" => _buildLargeTypeChar(" "),
"r" => _buildLargeTypeChar(" "),
"s" => _buildLargeTypeChar(" "),
"t" => _buildLargeTypeChar(" "),
"u" => _buildLargeTypeChar(" "),
"v" => _buildLargeTypeChar(" "),
"w" => _buildLargeTypeChar(" "),
"x" => _buildLargeTypeChar(" "),
"y" => _buildLargeTypeChar(" "),
"z" => _buildLargeTypeChar(" "),
"{" => _buildLargeTypeChar(" "),
"|" => _buildLargeTypeChar(" "),
"}" => _buildLargeTypeChar(" "),
"~" => _buildLargeTypeChar(" "),
#"" => _buildLargeTypeChar("▚▚▚▚▚▚▚▚▚"), # not sure
)
| Zalgo | https://github.com/cormullion/Zalgo.jl.git |
|
[
"MIT"
] | 1.3.0 | 8a4aef12779f4becb49be401efc6f57777b5bc2c | code | 2742 | using Zalgo
using Test
s = "Julia is really cool"
@test length(zalgo(s)) > length(s)
@test (zalgo(s, upmarks=1:2) |> length > length(s)) == true
@test (zalgo(s, middlemarks=1:2) |> length > length(s)) == true
@test (zalgo(s, downmarks=1:2) |> length > length(s)) == true
@test (zalgo(s, maxmarks=2) |> length > length(s)) == true
@test length(zalgo(s, maxmarks=0)) == length(s)
@test boldscript(string('B'))[1] |> Int == 120017
@test boldfraktur(string('B'))[1] |> Int == 120173
@test bolditalic(string('B'))[1] |> Int == 119913
@test bolditalicsans(string('B'))[1] |> Int == 120381
@test boldroman(string('B'))[1] |> Int == 119809
@test boldsans(string('B'))[1] |> Int == 120277
@test italic(string('B'))[1] |> Int == 119861
@test italicsans(string('B'))[1] |> Int == 120329
@test sans(string('B'))[1] |> Int == 120225
@test script(string('B'))[1] |> Int == 8492
@test teletype(string('B'))[1] |> Int == 120433
@test upsidedown(string('B'))[1] |> Int == 66578
@test blackboard(string('B'))[1] |> Int == 120121
@test fraktur(string('B'))[1] |> Int == 120069
@test boldscript(string('w'))[1] |> Int == 120064
@test boldfraktur(string('w'))[1] |> Int == 120220
@test bolditalic(string('w'))[1] |> Int == 119960
@test bolditalicsans(string('w'))[1] |> Int == 120428
@test boldroman(string('w'))[1] |> Int == 119856
@test boldsans(string('w'))[1] |> Int == 120324
@test italic(string('w'))[1] |> Int == 119908
@test italicsans(string('w'))[1] |> Int == 120376
@test sans(string('w'))[1] |> Int == 120272
@test script(string('w'))[1] |> Int == 120012
@test teletype(string('w'))[1] |> Int == 120480
@test upsidedown(string('w'))[1] |> Int == 653
@test blackboard(string('w'))[1] |> Int == 120168
@test fraktur(string('w'))[1] |> Int == 120220
@test boxed(string('w'))[1] |> Int == 127366
@test circled(string('w'))[1] |> Int == 9446
@test circled(string('w'), negative=true)[1] |> Int == 127366
@test segmented(string('1'))[1] |> Int == 130033
@test segmented(string('0'))[1] |> Int == 130032
@test length(blackboard("Figure 2")) == 8
@test length(blackboard("Figure 22")) == 9
# two script styles
sc1 = collect(script("Good Morning Sir", roundhand=false))
sc2 = collect(script("Good Morning Sir", roundhand=true))
# they'll only differ by one: sc2 has a VS2 (U+FE01) after every glyph
@test length(setdiff(sc2, sc1)) == 1
@test length(large_type("ABC").glyphs) == 3
#=
# build
for f in (boldscript, boldfraktur, bolditalic, bolditalicsans, boldroman, boldsans, italic, italicsans, sans, script, teletype, upsidedown, blackboard, fraktur)
ch = 'w'
ff = f(string(ch))
fch = Int(Char(ff[1]))
println(" @test ", f, "(string('$(ch)'))[1] |> Int", " == ", fch,)
end
=#
| Zalgo | https://github.com/cormullion/Zalgo.jl.git |
|
[
"MIT"
] | 1.3.0 | 8a4aef12779f4becb49be401efc6f57777b5bc2c | docs | 387 | # Changelog
## [v1.3.0] - 2024-02-25
### Added
- large type
- roundhand script option
## Changed
- up to Documenter 1
### Removed
### Deprecated
###################################################################
## [v1.2.0] - 2021-12-30
### Added
- boxed letters
## Changed
### Removed
### Deprecated
###################################################################
| Zalgo | https://github.com/cormullion/Zalgo.jl.git |
|
[
"MIT"
] | 1.3.0 | 8a4aef12779f4becb49be401efc6f57777b5bc2c | docs | 4008 | 
# Zalgo
| **Documentation** | **Build Status** | **Code Coverage** |
|:--------------------------------------- |:-------------------------------------------|:-------------------------------:|
| [![][docs-stable-img]][docs-stable-url] | [![Build Status][ci-img]][ci-url] | [![][codecov-img]][codecov-url] |
| [![][docs-latest-img]][docs-latest-url] | [![][appveyor-img]][appveyor-url] | |
```julia
using Zalgo
zalgo("Julia approaches")
"J̳̋ͪ︡ų̔l̲̮̲̏̆͋i͖͈̬̭ͭ̄a̬ͯ a̖̖̝ͬͨ͢p̘͓̣̄̕p̯ͥ̍͘r̯ͧ̄o̘̖̮͌̚ã͔̍ͣc̗ͪh̨͗︢e̚s̡̡︡ͮ͐"
```
```julia
zalgo("""
Zalgo is an Internet legend about an ominous entity believed to
cause insanity, death and destruction of the world, similar to the
creature Cthulhu created by H.P. Lovecraft in the 1920s. Zalgo is
often associated with scrambled text on webpages and photos of
people whose eyes and mouth have been covered in black. Or:
Zalgo is something that's coming. It's coming soon.
It has nothing to do with Lovecraft. -
https://knowyourmeme.com/memes/zalgo""")
Z̯͗ͮ̃ͣa̖ͬl̢͍͐͞g̣͖ͩó͓͚͒̋ į̜͞s̖̽͊︢ͩ ȧ̝̙̜ͦn͖̤̪̅︡̑ I̞̎n̤̅t̬̆ͬ͑ḛ͕͒͋̉ṟͤͮͫn̢̰̎̓ͥe̝̯̹︢͟t͓̹̤̋ lͯe͎ͮͮ̕g̬̝̬̲͑̐e͖̪͌̄n̢̘̭͒︢ḑ̹͓ͬ̎ͯ a̺̅b̽͜͡ō̰͔̻̮ų̘̜͊̑̒t̃ͮ︢͜ a̰̩̤ͯ̆͠n̢͖̽̆̃ͣ o͚̬͔̹ͪ͑m̝̀ͧȋ̱̺̎̌n͔̖͈͋ͥ̂o̱ͯ̒u̬̰̥̲ͥs̘̄̆̕ e̙̘̙͗n̝͋ͮ͢t͍̭͗ị͋͟ț̜͑̒̑͞ÿ̤̟̹̄̓ b̩ͮe̼̹͘l̼̟̯̊ị͖̼ͤ︠e͙͔̅v̡̰̖̍ē̙̙̎d̙̤͌ t̳̥ͧͩͭ̕o̠ͨ
c͖ͪͨ͞a︡̌̕͢ȗ̝̺̐ͥ͘s̩̅e̺̹̱͛ i̧̺̙͔̔͝n̻︠̀͡s̬̝͋̄a̳̘̻̋͞n̟̏͞i̪̲͗ͣͨͯt̨̖̔̊̑͒y̘̌, d̡̩̪̄͜ě̪̫a̱͙̞͛t͍h̭̥̔ â͇̘̙̑͊n̥̽d̯͐̐͡ ḑ̠̍ͨ̄͝ẽ͙̰̉̚s̙̗̏̚t̻͝r̘ͧͤ̇ͧú͉̪͝c͓̜͓͒̎ͩt̺̑ï̳̼ͣo͙̞͖̪︡ṅ̩̦ ȯ̩͎̬̼f̧̹̠͉̈ t̳̫͊̄͠h̳͇̠̋͋͞e̤̟͌ͭ̚ ẇ͔o̠ͨ̆͊r͖̭͕ͨͯ͘l̫ͣ͜d̙͕ͧͭ, s̼̔i͙̱͋m̪̞i̭͇̐͛̔l̲̩̏̋̉͝ä̢͉ͤr̖̦̽ t͕͚ͧo̦͛̑ t̮ͫ̑h̭ͯe͎̦̹͐̎͊
c̨̪ͮ̎̍r̲͑̀̀ȩ̝̰ͥ̎̈ä̹̖t̥︢̽̏͟u̠͋ͨ̕͢r̨̻̟͛e̥̎︠̐̓ C͖͌ͧţ̠̈ͦ̔̕ḫ̱̄ͣ̇͛ù̬ͭ͝l̯̝̟̐̊͟h̦́͢͟ú̪̳̜ c͕r̜̼̄ͦ̕e̡̕ḁ͐t͚̮̣͚̽e̥̊̒ͯd͙̮͎͑ͪ̄ b̭͘y̫̗͉︡͞ H̹̮̍.P̫̤ͧ. L̞̳͓̍ͭo͖͍ͥ︢v̘͉ͫ͞e̳͙͌͟c̠ͭ̌r̫ͭͤa̼̜͑͝f̪̜͛͟ṫ̪̤̪ i̮̼̺ͨ͝n̺̭̆͋͘ t̨̠̫̦̐h͉̗ͭͯĕ͍̼̟̮̀ 1920s̭͐̽︠. Z̟̩̃͜a͇͍ͮ͒l̰̻̜︠͡g̭̮ͧo̹ͬ͞ i̭͗ͣs̬͓̄ͯ̃
ȍ̧̯͐f̹̟̭ͮͦt̗ͯe̡̥͚ͮn̬̼̖ͫ a̺̤̲͑s͓̦ͪs̪̠̯̋o̲̔c̨͉ͨi͇͛͋͊a̤︢ͪ︢t̹̱͑e͙̜̮͑̚ḏ͓̭ͦ͛ w̟̠̋i̭͝t͉̬̝̀͛h͎ͬ͛͊ s̯̻̽ͧ͘c͇̹͠͡͝r̥̝̐͞ą̰︡̑m̤̒͋͘b̫͍̲︠̍ͥl͇͈̻͋͊͝e͈͈̺͗̍d̥̠̅͡ t̲̄ͥ̍͜ȩ̜̇͗x̤͋̋̒̅t̺̩̚ o͉̻ͩͫn̢̠̒͞ w̼̔̈̎ͪe͎̝̗͒̍͠b̰̏͐̚p̖͕̎︢ͨ̋ã̭g͍̲̽̃̕͝e̙͈̐̅͗̚s̜̙̼̫͞͡ ā̰̍͢n͉̯͒͞ḑ̦̤͝ p͖̚h̭̽͠o̺̅ͮ͢t͈︢̅ͫ̕ô̞̳̩s̮ͭ͛ͨ o͖͑̇ͥ͒f̥̓
p̤͗ê̢̝͈͓̊o͓̟︢̅p̩̣̉̄ͧ̓l̮︠︢̔̚e̺̩̞ͣ͐ͤ w̰̺̙ͯ̊h͙ͩ̔o̗̪͚͘s͔̙̯ͨ̽̕ḙ̢̐ͨ̓͠ ě̞ͩÿ̝̌e̗︡s̲̮̽ a̽͟͜n̗̦̏͐d͇̖ͤ̕ m̳̋o͖͍̩͐ͬ͟u̬͕̯͗t̢͕̪͐̉h̜ͧ̂ͫ h̨͒a̡͚͚͌̚v̺̙͝e͍ͧ͘͟ b̰̕e̙̲ͫ︠e̞̖̟̯ͭ͠n̰̂͊͝ c̗ͧo̺͒͒v̲̙̫̋̐e̻̮͐͡r̯͋͒͝e̞ͯ︠͢d͉̺̈ i̗͔︡ͩ͞n̩̟͎͔͛̎ b͓͉̳̩ͣ̑ļ͔̝̘ͫ͋ä̩̍̒ċ͙́̏k͊͟. O̟̯̕r͕̖͙̤ͣ͘:
Ź͖̹ā̖̩͌l̮̝ͨg̹̎͢õ̠̇̊͜ i̫̪͑ͯş͕̀ͦ̚͟ s̮̯ͩ̐͝͡ǫ̩ͬ͒m̳ͥ̊ͪ︡e̙͙͌ͬ͟t̗͖ͦ̑͝ẖ̪͇ͫ̎i̜͈ͣ̋͋͘ṋ͍ͦ̇͊̕g̼ͯ͞ ť͙͒͟h͇͈̐ͤͤ̕a͖͐̽̈̎t̜͇ͩ̒͝'s̘̃︢̄ c̼̖̉͡o̹͕̭͗̌͜m̘͈͚̎̓︡i̥̭ͧ̓͢n͌͗̑ĝ̘ͦ̆. I̗͋̎t̲͛'s̞̙̺͇̈ͧ ċ̘̐̏͟͝o̧͗͋ͤ͞m̝̜͕̫ͧ͞i͍͎̞︡͢n̺ͮg̨̹ͦ s̻̲̋̏ͪő̢̫ͧ̀o̡̒̔͋̽n̪͍̕.
I̜̙̅͡t͔͎͒ h̨̻͑͛a̺͌͊̕s̻̰ͩ̓̇ ň͓ͣ͌o͉t̝̽ͦh̠̦͕ͫͥi͚̕͢͞n̖g͎ ṯ︡̈̈̚o͙̍ d̫ͨ͌ȍ̟͈̂ w̥̬̝︡i̯͒͗t͇̋ḩ̖︢͝ L̤̇ͮǫ̰ͣ̎̑̀v̞͙̚ě̩̃c̢̋r̳︡ͨ̐͒a̰̗̻͙͋ḟ̤̚͡t̻̪̳ͩ. -
h͉ͩͭ̚t̊ͨ͡t̳ͮp͉͕͇̂̽s̯͕͇̽̎̕://k̝̤͐̇n̦︠̀͞ȏ̻̭͜ẁ̹̥̗̱y͕ͭ͢o̰͚͟͢u̡͙͖ͨȓ͈︡m͎͊︢̓ę͇︡͑ͤͫm̲͔̟ͫe͉͛͌̚.c̺͉͊͋o̝̲̣̽m̳̳ͣͣͩ/ṃ̬̓͡e͎͍̟̪ͨm̝̻̎̏e͒ͯs̟̮̼̒/z͙̦̝ͤ͒̀a̢͙̓͒l̨̰͉̋g͎̀͐̃͘o̺ͫ͐͠
```
"""
Because this is pretty useless, this package provides some other more useful functions for converting text.
[docs-stable-img]: https://img.shields.io/badge/docs-stable%20release-blue.svg
[docs-stable-url]: https://cormullion.github.io/Zalgo.jl/dev/
[docs-latest-img]: https://img.shields.io/badge/docs-current--master-orange.svg
[docs-latest-url]: https://cormullion.github.io/Zalgo.jl/dev/
[travis-img]: https://travis-ci.com/cormullion/Zalgo.jl.svg?branch=master
[travis-url]: https://travis-ci.com/cormullion/Zalgo.jl
[appveyor-img]: https://ci.appveyor.com/api/projects/status/59hherf65c713iaw/branch/master?svg=true
[appveyor-url]: https://ci.appveyor.com/project/cormullion/zalgo-jl
[codecov-img]: https://codecov.io/gh/cormullion/zalgo.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/cormullion/zalgo.jl
[ci-img]: https://github.com/cormullion/zalgo.jl/workflows/CI/badge.svg
[ci-url]: https://github.com/cormullion/zalgo.jl/actions?query=workflow%3ACI
| Zalgo | https://github.com/cormullion/Zalgo.jl.git |
|
[
"MIT"
] | 1.3.0 | 8a4aef12779f4becb49be401efc6f57777b5bc2c | docs | 43 | # Index
```@autodocs
Modules = [Zalgo]
``` | Zalgo | https://github.com/cormullion/Zalgo.jl.git |
|
[
"MIT"
] | 1.3.0 | 8a4aef12779f4becb49be401efc6f57777b5bc2c | docs | 3321 | # Zalgo
## Z̝̫͈̝ͩ͒̔͐̑̆̔︠̈a̜̙̜̯͇̳̱ͯͫͦ͑ͦ͘͟l͙͙̻̱͌ͮ̐́ͮͯ͟͢g͚̋̈̎͋̎̒̐ͮͯͦò̻̉\n
Zalgo text is digital text that has been modified by the addition of combining characters, the Unicode symbols more usually employed to position diacritics above and below glyphs.
“Zalgo” was named for a 2004 Internet meme that ascribed it to the influence of an eldritch deity. There’s no official connection with H. P. Lovecraft’s Cthulhu.
Use the `zalgo` function to add diacritics to a string. The options let you control how many diacritics are used. For maximum degeneracy, set `maxmarks` to a large number.
```@example
using Zalgo
zalgo("Julia is cool", maxmarks=100)
```
## Utilities
!!! note
These utility functions use glyphs from the current font. Not many fonts contain all the necessary glyphs!
Because this package is useless — and occasionally bad, because it can cause some applications to misbehave — it also provides some utility functions to justify its existence.
The following functions convert the input string to equivalent characters that are to be found in the eldritch lexicon of the Unicode realm, where arcane glyphs and cryptic symbols abound.
```julia
blackboard("Hello World") # double-struck or 'blackboard' style
boldfraktur("Hello World") # bold Fraktur (black letter)
bolditalic("Hello World") # bold italic
bolditalicsans("Hello World") # bold italic sans-serif
boldroman("Hello World") # bold roman
boldsans("Hello World") # bold sans-serif
boldscript("Hello World") # bold script-style
fraktur("Hello World") # Fraktur (black letter)
large_type("Hello World") # Large Type (9 segments per glyph)
italic("Hello World") # italic
italicsans("Hello World") # italic sans-serif
sans("Hello World") # sans-serif
script("Hello World") # script
teletype("Hello World") # monospaced 'teletype'
upsidedown("Hello World") # might look like it's flipped upside down
circled("HELLO WORLD") # letters in circles
boxed("hello world") # letters in boxes
segmented("0123456789") # digits converted to 7-segment 'LED"-type display
```

You can see what's going on using:
```julia-repl
julia-1.10> collect(blackboard("Hello World"))
11-element Vector{Char}:
'ℍ': Unicode U+210D (category Lu: Letter, uppercase)
'𝕖': Unicode U+1D556 (category Ll: Letter, lowercase)
'𝕝': Unicode U+1D55D (category Ll: Letter, lowercase)
'𝕝': Unicode U+1D55D (category Ll: Letter, lowercase)
'𝕠': Unicode U+1D560 (category Ll: Letter, lowercase)
' ': ASCII/Unicode U+0020 (category Zs: Separator, space)
'𝕎': Unicode U+1D54E (category Lu: Letter, uppercase)
'𝕠': Unicode U+1D560 (category Ll: Letter, lowercase)
'𝕣': Unicode U+1D563 (category Ll: Letter, lowercase)
'𝕝': Unicode U+1D55D (category Ll: Letter, lowercase)
'𝕕': Unicode U+1D555 (category Ll: Letter, lowercase)
```
### "Large Type"
Unicode 16 defines a set of glyphs that can be combined in a 3 × 3 grid to build larger letters.

### Script styles
There are two mathematical script styles:

| Zalgo | https://github.com/cormullion/Zalgo.jl.git |
|
[
"MIT"
] | 0.1.2 | 504869786fe9e38cf52cbbac43e68f2f102e13e3 | code | 12536 | module EasyRanges
export
@range,
@reverse_range
using Base: OneTo
"""
EasyRanges.ContiguousRange
is an alias for `AbstractUnitRange{Int}`, the type of ranges in an
[`EasyRanges.CartesianBox`][(@ref).
"""
const ContiguousRange = AbstractUnitRange{Int}
"""
EasyRanges.CartesianBox{N}
is an alias for `CartesianIndices{N}` but restricted to have contiguous
Cartesian indices. Since Julia 1.6, `CartesianIndices` may have non-unit step,
hence non-contiguous indices.
"""
const CartesianBox{N} = CartesianIndices{N,<:NTuple{N,ContiguousRange}}
"""
EasyRanges.StretchBy(δ) -> obj
yields a callable object `obj` such that `obj(x)` yields `x` stretched by
offset `δ`.
""" StretchBy
"""
EasyRanges.ShrinkBy(δ) -> obj
yields a callable object `obj` such that `obj(x)` yields `x` shrinked by offset
`δ`.
""" ShrinkBy
"""
@range expr
rewrites range expression `expr` with extended syntax. The result is an
`Int`-valued index range (possibly Cartesian) where indices are running in the
forward direction (with a positive step).
"""
macro range(ex::Expr)
esc(Expr(:call, :(EasyRanges.forward), rewrite!(ex)))
end
"""
@reverse_range expr
rewrites range expression `expr` with extended syntax. The result is an
`Int`-valued index range (possibly Cartesian) where indices are running in the
reverse direction (with a negative step).
"""
macro reverse_range(ex::Expr)
esc(Expr(:call, :(EasyRanges.backward), rewrite!(ex)))
end
rewrite!(x) = x # left anything else untouched
function rewrite!(ex::Expr)
if ex.head === :call
if ex.args[1] === :(+)
ex.args[1] = :(EasyRanges.plus)
elseif ex.args[1] === :(-)
ex.args[1] = :(EasyRanges.minus)
elseif ex.args[1] === :(∩) || ex.args[1] === :(intersect) || ex.args[1] == :(Base.intersect)
ex.args[1] = :(EasyRanges.cap)
elseif ex.args[1] === :(±)
ex.args[1] = :(EasyRanges.stretch)
elseif ex.args[1] === :(∓)
ex.args[1] = :(EasyRanges.shrink)
end
for i in 2:length(ex.args)
rewrite!(ex.args[i])
end
end
return ex
end
"""
EasyRanges.forward(R)
yields an object which contains the same (Cartesian) indices as `R` but with
positive step(s) and `Int`-valued. Arguments of other types are returned
unchanged.
"""
forward(a) = a
forward(a::AbstractUnitRange{Int}) = a
forward(a::AbstractUnitRange{<:Integer}) = to_int(a)
function forward(a::OrdinalRange{<:Integer,<:Integer})
first_a, step_a, last_a = first_step_last(a)
return step_a ≥ 0 ? (first_a:step_a:last_a) : (last_a:-step_a:first_a)
end
forward(a::CartesianIndices) =
isa(a, CartesianBox) ? a : CartesianIndices(map(forward, ranges(a)))
"""
EasyRanges.backward(R)
yields an object which constains the same (Cartesian) indices as `R` but with
negative step(s) and `Int`-valued. Arguments of other types are returned
unchanged.
"""
backward(a) = a
function backward(a::AbstractUnitRange{<:Integer})
first_a, last_a = first_last(a)
return last_a:-1:first_a
end
function backward(a::OrdinalRange{<:Integer,<:Integer})
first_a, step_a, last_a = first_step_last(a)
return step_a ≤ 0 ? (first_a:step_a:last_a) : (last_a:-step_a:first_a)
end
backward(a::CartesianIndices) = CartesianIndices(map(backward, ranges(a)))
"""
EasyRanges.plus(a...)
yields the result of expression `+a`, `a + b`, `a + b + c...` in
[`@range`](@ref) macro.
""" plus
# Use ordinary + by default and deal with multiple arguments.
plus(a) = +a
plus(a, b) = a + b
@inline plus(a, b, c...) = plus(plus(a, b), c...)
# Unary plus just converts to `Int`-valued object.
plus(a::Int) = a
plus(a::Integer) = to_int(a)
plus(a::AbstractUnitRange{Int}) = a
plus(a::AbstractUnitRange{<:Integer}) = to_int(a)
plus(a::OrdinalRange{<:Integer,<:Integer}) = forward(a)
plus(a::CartesianIndex) = a
plus(a::CartesianIndices) = forward(a)
# Binary plus.
plus(a::Integer, b::Integer) = to_int(a) + to_int(b)
function plus(a::AbstractUnitRange{<:Integer}, b::Integer)
first_a, last_a = first_last(a)
int_b = to_int(b)
return (first_a + int_b):(last_a + int_b)
end
plus(a::Integer, b::AbstractUnitRange{<:Integer}) = plus(b, a)
function plus(a::OrdinalRange{<:Integer,<:Integer}, b::Integer)
first_a, step_a, last_a = first_step_last(a)
int_b = to_int(b)
if step_a ≥ 0
return (first_a + int_b):(step_a):(last_a + int_b)
else
return (last_a + int_b):(-step_a):(first_a + int_b)
end
end
plus(a::Integer, b::OrdinalRange{<:Integer,<:Integer}) = plus(b, a)
"""
EasyRanges.minus(a...)
yields the result of expression `-a` and `a - b` in [`@range`](@ref) macro.
""" minus
# Use ordinary - by default.
minus(a) = -a
minus(a, b) = a - b
# Unary minus yields positive step sign.
minus(a::Integer) = -to_int(a)
function minus(a::AbstractUnitRange{<:Integer})
first_a, last_a = first_last(a)
return (-last_a):(-first_a)
end
function minus(a::OrdinalRange{<:Integer,<:Integer})
first_a, step_a, last_a = first_step_last(a)
if step_a ≥ 0
return (-last_a):(step_a):(-first_a)
else
return (-first_a):(-step_a):(-last_a)
end
end
minus(a::CartesianIndex) = -a
minus(a::CartesianIndices) = CartesianIndices(map(minus, ranges(a)))
# Binary minus.
minus(a::Integer, b::Integer) = to_int(a) - to_int(b)
function minus(a::AbstractUnitRange{<:Integer}, b::Integer)
first_a, last_a = first_last(a)
int_b = to_int(b)
return (first_a - int_b):(last_a - int_b)
end
function minus(a::Integer, b::AbstractUnitRange{<:Integer})
int_a = to_int(a)
first_b, last_b = first_last(b)
return (int_a - last_b):(int_a - first_b)
end
function minus(a::OrdinalRange{<:Integer,<:Integer}, b::Integer)
first_a, step_a, last_a = first_step_last(a)
int_b = to_int(b)
if step_a ≥ 0
return (first_a - int_b):(step_a):(last_a - int_b)
else
return (last_a - int_b):(-step_a):(first_a - int_b)
end
end
function minus(a::Integer, b::OrdinalRange{<:Integer,<:Integer})
int_a = to_int(a)
first_b, step_b, last_b = first_step_last(b)
if step_b ≥ 0
return (int_a - last_b):(step_b):(int_a - first_b)
else
return (int_a - first_b):(-step_b):(int_a - last_b)
end
end
"""
EasyRanges.cap(a...)
yields the result of expression `a ∩ b` in [`@range`](@ref) macro.
"""
cap(a, b) = intersect(a, b) # use default behavior
cap(a::Integer, b::Integer) = cap(to_int(a), to_int(b))
cap(a::Int, b::Int) = ifelse(a === b, a:a, 1:0)
cap(a::Integer, b::AbstractUnitRange{<:Integer}) = cap(b, a)
function cap(a::AbstractUnitRange{<:Integer}, b::Integer)
first_a, last_a = first_last(a)
int_b = to_int(b)
ifelse((first_a ≤ int_b)&(int_b ≤ last_a), int_b:int_b, 1:0)
end
cap(a::OneTo, b::OneTo) = OneTo{Int}(min(to_int(a.stop), to_int(b.stop)))
function cap(a::AbstractUnitRange{<:Integer},
b::AbstractUnitRange{<:Integer})
first_a, last_a = first_last(a)
first_b, last_b = first_last(b)
return max(first_a, first_b):min(last_a, last_b)
end
function cap(a::OrdinalRange{<:Integer,<:Integer},
b::OrdinalRange{<:Integer,<:Integer})
return forward(a) ∩ forward(b) # FIXME: Optimize?
end
cap(a::CartesianIndex{N}, b::CartesianIndex{N}) where {N} =
CartesianIndices(map(cap, Tuple(a), Tuple(b)))
# Combine CartesianIndices and CartesianIndices or CartesianIndex.
for f in (:plus, :minus, :cap)
@eval begin
$f(a::CartesianIndices{N}, b::CartesianIndex{N}) where {N} =
CartesianIndices(map($f, ranges(a), Tuple(b)))
$f(a::CartesianIndex{N}, b::CartesianIndices{N}) where {N} =
CartesianIndices(map($f, Tuple(a), ranges(b)))
end
end
cap(a::CartesianIndices{N}, b::CartesianIndices{N}) where {N} =
CartesianIndices(map(cap, ranges(a), ranges(b)))
"""
EasyRanges.stretch(a, b)
yields the result of stretching `a` by amount `b`. This is equivalent to the
expression `a ± b` in [`@range`](@ref) macro.
"""
stretch(a::Int, b::Int) = (a - b):(a + b)
function stretch(a::AbstractUnitRange{<:Integer}, b::Integer)
first_a, last_a = first_last(a)
int_b = to_int(b)
return (first_a - int_b):(last_a + int_b)
end
function stretch(a::OrdinalRange{<:Integer}, b::Integer)
first_a, step_a, last_a = first_step_last(a)
int_b = to_int(b)
(int_b % step_a) == 0 || throw(ArgumentError("stretch must be multiple of the step"))
if step_a ≥ 0
return (first_a - int_b):step_a:(last_a + int_b)
else
return (last_a - int_b):(-step_a):(first_a + int_b)
end
end
"""
EasyRanges.shrink(a, b)
yields the result of shrinking `a` by amount `b`. This is equivalent to the
expression `a ∓ b` in [`@range`](@ref) macro.
"""
shrink(a::Int, b::Int) = (a + b):(a - b)
function shrink(a::AbstractUnitRange{<:Integer}, b::Integer)
first_a, last_a = first_last(a)
int_b = to_int(b)
return (first_a + int_b):(last_a - int_b)
end
function shrink(a::OrdinalRange{<:Integer}, b::Integer)
first_a, step_a, last_a = first_step_last(a)
int_b = to_int(b)
(int_b % step_a) == 0 || throw(ArgumentError("shrink must be multiple of the step"))
if step_a ≥ 0
return (first_a + int_b):step_a:(last_a - int_b)
else
return (last_a + int_b):(-step_a):(first_a - int_b)
end
end
for (f, s) in ((:stretch, :StretchBy),
(:shrink, :ShrinkBy))
@eval begin
struct $s <: Function
δ::Int # left operand
end
(obj::$s)(x::Integer) = $f(x, obj.δ)
(obj::$s)(x::OrdinalRange{<:Integer,<:Integer}) = $f(x, obj.δ)
$f(a::Integer, b::Integer) = $f(to_int(a), to_int(b))
$f(a::CartesianIndices{N}, b::CartesianIndex{N}) where {N} =
CartesianIndices(map($f, ranges(a), Tuple(b)))
$f(a::CartesianIndices{N}, b::NTuple{N,Integer}) where {N} =
CartesianIndices(map($f, ranges(a), b))
$f(a::CartesianIndices, b::Integer) =
CartesianIndices(map($s(b), ranges(a)))
end
# A Cartesian index can be stretched, not shrinked.
if f === :stretch
@eval begin
$f(a::CartesianIndex{N}, b::CartesianIndex{N}) where {N} =
CartesianIndices(map($f, Tuple(a), Tuple(b)))
$f(a::CartesianIndex{N}, b::NTuple{N,Integer}) where {N} =
CartesianIndices(map($f, Tuple(a), b))
$f(a::CartesianIndex, b::Integer) =
CartesianIndices(map($s(b), Tuple(a)))
end
end
end
"""
EasyRanges.ranges(R)
yields the list of ranges in Cartesian indices `R`.
"""
ranges(R::CartesianIndices) = getfield(R, :indices)
"""
EasyRanges.first_last(x) -> (first_x, last_x)
yields the 2-tuple `(first(x), last(x))` converted to be `Int`-valued.
"""
first_last(x::AbstractUnitRange{<:Integer}) =
(to_int(first(x)), to_int(last(x)))
first_last(x::CartesianIndices) = begin
flag = true
for r in ranges(x)
flag &= (step(r) == 1)
end
flag || throw(ArgumentError("Cartesian ranges have non-unit step"))
return (CartesianIndex(map(first, ranges(x))),
CartesianIndex(map(last, ranges(x))))
end
"""
EasyRanges.first_step_last(x) -> (first_x, step_x, last_x)
yields the 3-tuple `(first(x), step(x), last(x))` converted to be `Int`-valued.
"""
first_step_last(x::AbstractUnitRange{<:Integer}) =
(to_int(first(x)), 1, to_int(last(x)))
first_step_last(x::OrdinalRange{<:Integer,<:Integer}) =
(to_int(first(x)), to_int(step(x)), to_int(last(x)))
first_step_last(x::CartesianIndices) =
(CartesianIndex(map(first, ranges(x))),
CartesianIndex(map(step, ranges(x))),
CartesianIndex(map(last, ranges(x))))
"""
EasyRanges.to_int(x)
yields an `Int`-valued equivalent of `x`.
"""
to_int(x::Int) = x
to_int(x::Integer) = to_type(Int, x)
to_int(x::OneTo{Int}) = x
to_int(x::OneTo) = OneTo{Int}(x.stop)
to_int(x::AbstractUnitRange{Int}) = x
to_int(x::AbstractUnitRange{<:Integer}) = to_int(first(x)):to_int(last(x))
to_int(x::OrdinalRange{Int,Int}) = x
to_int(x::OrdinalRange{<:Integer}) =
to_int(first(x)):to_int(step(x)):to_int(last(x))
# Cartesian indices are already `Int`-valued.
to_int(x::CartesianIndex) = x
to_int(x::CartesianIndices) = x
to_int(x::Tuple{Vararg{Int}}) = x
to_int(x::Tuple{Vararg{Integer}}) = map(to_int, x)
"""
EasyRanges.to_type(T, x)
yields `x` surely converted to type `T`.
"""
to_type(::Type{T}, x::T) where {T} = x
to_type(::Type{T}, x) where {T} = convert(T, x)::T
end
| EasyRanges | https://github.com/emmt/EasyRanges.jl.git |
|
[
"MIT"
] | 0.1.2 | 504869786fe9e38cf52cbbac43e68f2f102e13e3 | code | 4474 | module Bench
using EasyRanges
using BenchmarkTools, Test
const try_turbo = false # NOTE: @turbo code broken for Cartesian indices
@static if try_turbo
using LoopVectorization
end
test1_jl(A, B, C) = A ∩ (B .+ C)
test1(A, B, C) = @range A ∩ (B + C)
test2_jl(A, B, C) = A ∩ (B .- C)
test2(A, B, C) = @range A ∩ (B - C)
# Discrete correlation.
function correlate_jl!(dst, A, B)
T = promote_type(eltype(A), eltype(B))
@inbounds for i ∈ CartesianIndices(dst)
s = zero(T)
@simd for j ∈ CartesianIndices(A) ∩ (i .+ CartesianIndices(B))
s += A[j]*B[j-i]
end
dst[i] = s
end
return dst
end
function correlate!(dst, A, B)
T = promote_type(eltype(A), eltype(B))
@inbounds for i ∈ CartesianIndices(dst)
s = zero(T)
@simd for j ∈ @range CartesianIndices(A) ∩ (i + CartesianIndices(B))
s += A[j]*B[j-i]
end
dst[i] = s
end
return dst
end
@static if try_turbo
function correlate_turbo!(dst, A, B)
T = promote_type(eltype(A), eltype(B))
@inbounds for i ∈ CartesianIndices(dst)
s = zero(T)
@turbo for j ∈ @range CartesianIndices(A) ∩ (i + CartesianIndices(B))
s += A[j]*B[j-i]
end
dst[i] = s
end
return dst
end
end
# Discrete convolution.
function convolve_jl!(dst, A, B)
T = promote_type(eltype(A), eltype(B))
@inbounds for i ∈ CartesianIndices(dst)
s = zero(T)
@simd for j ∈ CartesianIndices(A) ∩ (i .- CartesianIndices(B))
s += A[j]*B[i-j]
end
dst[i] = s
end
return dst
end
function convolve!(dst, A, B)
T = promote_type(eltype(A), eltype(B))
@inbounds for i ∈ CartesianIndices(dst)
s = zero(T)
@simd for j ∈ @range CartesianIndices(A) ∩ (i - CartesianIndices(B))
s += A[j]*B[i-j]
end
dst[i] = s
end
return dst
end
@static if try_turbo
function convolve_turbo!(dst, A, B)
T = promote_type(eltype(A), eltype(B))
@inbounds for i ∈ CartesianIndices(dst)
s = zero(T)
@turbo for j ∈ @range CartesianIndices(A) ∩ (i - CartesianIndices(B))
s += A[j]*B[i-j]
end
dst[i] = s
end
return dst
end
end
A = CartesianIndices((30,40,50));
B = CartesianIndices((3,4,5));
for I ∈ (CartesianIndex(1,2,3), #= CartesianIndex(10,20,30) =#)
println("Testing with I = $I")
print(" A ∩ (B .+ I)"); @btime test1_jl($A, $B, $I);
print("@range A ∩ (B + I)"); @btime test1($A, $B, $I);
print(" A ∩ (I .+ B)"); @btime test1_jl($A, $I, $B);
print("@range A ∩ (I + B)"); @btime test1($A, $I, $B);
print(" A ∩ (B .- I)"); @btime test2_jl($A, $B, $I);
print("@range A ∩ (B - I)"); @btime test2($A, $B, $I);
print(" A ∩ (I .- B)"); @btime test2_jl($A, $I, $B);
print("@range A ∩ (I - B)"); @btime test2($A, $I, $B);
end
T = Float32
A = rand(T, (8,8))
B = rand(T, (32,32))
C1 = similar(B)
C2 = similar(B)
x = '×'
println("\nTesting correlation of $(join(size(A),x)) and $(join(size(B),x)) arrays")
print("base Julia with @simd "); @btime correlate_jl!($C1, $A, $B);
print("using @range and @simd "); @btime correlate!($C2, $A, $B);
@test C1 ≈ C2
if try_turbo
print("using @range and @turbo"); @btime correlate_turbo!($C2, $A, $B);
@test C1 ≈ C2
end
println("\nTesting correlation of $(join(size(B),x)) and $(join(size(A),x)) arrays")
print("base Julia with @simd "); @btime correlate_jl!($C1, $B, $A);
print("using @range and @simd "); @btime correlate!($C2, $B, $A);
@test C1 ≈ C2
if try_turbo
print("using @range and @turbo"); @btime correlate_turbo!($C2, $B, $A);
@test C1 ≈ C2
end
println("\nTesting convolution of $(join(size(A),x)) and $(join(size(B),x)) arrays")
print("base Julia with @simd "); @btime convolve_jl!($C1, $A, $B);
print("using @range and @simd "); @btime convolve!($C2, $A, $B);
@test C1 ≈ C2
if try_turbo
print("using @range and @turbo"); @btime convolve_turbo!($C2, $A, $B);
@test C1 ≈ C2
end
println("\nTesting convolution of $(join(size(B),x)) and $(join(size(A),x)) arrays")
print("base Julia with @simd "); @btime convolve_jl!($C1, $B, $A);
print("using @range and @simd "); @btime convolve!($C2, $B, $A);
@test C1 ≈ C2
if try_turbo
print("using @range and @turbo"); @btime convolve_turbo!($C2, $B, $A);
@test C1 ≈ C2
end
end # module
| EasyRanges | https://github.com/emmt/EasyRanges.jl.git |
|
[
"MIT"
] | 0.1.2 | 504869786fe9e38cf52cbbac43e68f2f102e13e3 | code | 12414 | module TestingEasyRanges
using Test
using Base: OneTo
using EasyRanges
using EasyRanges:
forward, backward, ranges, to_type, to_int, stretch, shrink,
first_last, first_step_last, plus, minus, cap
# A bit of type-piracy for more readable error messages.
Base.show(io::IO, x::CartesianIndices) =
print(io, "CartesianIndices($(x.indices))")
# CartesianIndices with non-unit ranges appear in Julia 1.6
const CARTESIAN_INDICES_MAY_HAVE_NON_UNIT_RANGES = (VERSION ≥ v"1.6")
@testset "EasyRanges" begin
# to_type
let A = [-1,0,2]
@test to_type(Array{Int}, A) === A
@test to_type(Array{Int16}, A) isa Array{Int16}
@test to_type(Array{Int16}, A) == A
end
# to_int
@test to_int(5) === 5
@test to_int(UInt16(7)) === 7
@test to_int(OneTo{Int}(8)) === OneTo(8)
@test to_int(OneTo{UInt16}(3)) === OneTo(3)
@test to_int(3:8) === 3:8
@test to_int(UInt16(3):UInt16(8)) === 3:8
@test to_int(8:-3:-1) === 8:-3:-1
@test to_int(Int16(8):Int16(-3):Int16(-1)) === 8:-3:-1
@test to_int(CartesianIndex(-1,2,3,4)) === CartesianIndex(-1,2,3,4)
@test to_int(CartesianIndices((Int16(-1):Int16(3),Int16(2):Int16(8)))) === CartesianIndices((-1:3,2:8))
@test to_int((-1,3,2)) === (-1,3,2)
@test to_int((Int16(-1),Int16(3),Int16(2))) === (-1,3,2)
# first_last and first_step_last
@test first_last(Int16(-4):Int16(11)) == (-4, 11)
@test_throws MethodError first_last(-4:2:11)
@test first_step_last(Int16(-4):Int16(11)) === (-4,1,11)
@test first_step_last(Int16(-4):Int16(2):Int16(11)) === (-4,2,10)
@test first_last(CartesianIndices((2:6, 3:5))) === (CartesianIndex(2,3), CartesianIndex(6,5))
@test first_step_last(CartesianIndices((2:6, 3:5))) === (CartesianIndex(2,3), CartesianIndex(1,1), CartesianIndex(6,5))
if CARTESIAN_INDICES_MAY_HAVE_NON_UNIT_RANGES
@test first_last(CartesianIndices((2:1:6, 3:1:5))) === (CartesianIndex(2,3), CartesianIndex(6,5))
@test_throws ArgumentError first_last(CartesianIndices((2:1:6, 3:2:5)))
@test first_step_last(CartesianIndices((2:6, 3:2:7))) === (CartesianIndex(2,3), CartesianIndex(1,2), CartesianIndex(6,7))
end
# Check normalization of ranges.
@test forward(π) === π
@test forward(OneTo(6)) === OneTo{Int}(6)
@test forward(OneTo{Int16}(6)) === OneTo{Int}(6)
@test forward(2:7) === 2:7
@test forward(Int16(2):Int16(7)) === 2:7
@test forward(-2:3:11) === -2:3:11
@test forward(Int16(-2):Int16(3):Int16(11)) === -2:3:11
@test forward(11:-3:-2) === -1:3:11
@test forward(Int16(11):Int16(-3):Int16(-2)) === -1:3:11
# backward
@test backward(π) === π
@test backward(OneTo(5)) === 5:-1:1
@test backward(2:3:12) === 11:-3:2
@test backward(11:-3:2) === 11:-3:2
# unary plus
@test plus(1.0) === 1.0
@test plus(7) === 7
@test plus(Int16(7)) === 7
@test plus(2:8) === 2:8
@test plus(Int16(2):Int16(8)) === 2:8
@test plus(2:3:12) === 2:3:11
@test plus(Int16(2):Int16(3):Int16(12)) === 2:3:11
@test plus(12:-4:-1) === 0:4:12
@test plus(CartesianIndex(-1,2,3,4)) === CartesianIndex(-1,2,3,4)
@test plus(CartesianIndices((4:8,2:9))) === CartesianIndices((4:8,2:9))
if CARTESIAN_INDICES_MAY_HAVE_NON_UNIT_RANGES
@test plus(CartesianIndices((8:-1:4,2:3:9))) === CartesianIndices((4:1:8,2:3:8))
end
# binary plus
@test plus(2, π) === (2 + π)
@test plus(3, 8) === 11
@test plus(Int16(3), Int16(8)) === 11
@test plus(1:4, 2) === 3:6
@test plus(2, 1:4) === 3:6
@test plus(1:2:8, 3) === 4:2:10
@test plus(3, 1:2:8) === 4:2:10
@test plus(8:-2:1, 3) === 5:2:11
@test plus(3, 8:-2:1) === 5:2:11
@test plus(CartesianIndices(((4:8, 2:9))), CartesianIndex(-1,2)) === CartesianIndices(((3:7, 4:11)))
@test (@range CartesianIndices(((4:8, 2:9))) + CartesianIndex(-1,2)) === CartesianIndices(((3:7, 4:11)))
@test plus(CartesianIndex(-1,2), CartesianIndices(((4:8, 2:9)))) === CartesianIndices(((3:7, 4:11)))
@test (@range CartesianIndex(-1,2) + CartesianIndices(((4:8, 2:9)))) === CartesianIndices(((3:7, 4:11)))
# plus with more arguments
@test plus(1.0, 2, π, sqrt(2)) === (1.0 + 2 + π + sqrt(2))
# unary minus
@test minus(1.0) === -1.0
@test minus(7) === -7
@test minus(Int16(7)) === -7
@test minus(2:8) === -8:-2
@test minus(Int16(2):Int16(8)) === -8:-2
@test minus(2:3:12) === -11:3:-2
@test minus(Int16(2):Int16(3):Int16(12)) === -11:3:-2
@test minus(12:-4:-1) === -12:4:0
@test minus(CartesianIndex(-1,2,3,4)) === CartesianIndex(1,-2,-3,-4)
@test minus(CartesianIndices((4:8,2:9))) === CartesianIndices((-8:-4,-9:-2))
if CARTESIAN_INDICES_MAY_HAVE_NON_UNIT_RANGES
@test minus(CartesianIndices((8:-1:3,2:3:9))) === CartesianIndices((-8:1:-3,-8:3:-2))
end
# binary minus
@test minus(2, π) === (2 - π)
@test minus(3, 8) === -5
@test minus(Int16(3), Int16(8)) === -5
@test minus(1:4, 2) === -1:2
@test minus(2, 1:4) === -2:1
@test minus(1:2:8, 3) === -2:2:4
@test minus(3, 0:2:9) === -5:2:3
@test minus(8:-2:1, 3) === -1:2:5
@test minus(3, 8:-2:1) === -5:2:1
@test minus(CartesianIndices(((4:8, 2:9))), CartesianIndex(-1,2)) === CartesianIndices(((5:9, 0:7)))
@test (@range CartesianIndices(((4:8, 2:9))) - CartesianIndex(-1,2)) === CartesianIndices(((5:9, 0:7)))
@test minus(CartesianIndex(-1,2), CartesianIndices(((4:8, 2:9)))) === CartesianIndices(((-9:-5, -7:0)))
@test (@range CartesianIndex(-1,2) - CartesianIndices(((4:8, 2:9)))) === CartesianIndices(((-9:-5, -7:0)))
# intersection
@test cap([1], 1) == [1]
@test cap(-7, -7) === -7:-7
@test cap(2, 0) === 1:0
@test cap(Int16(2), Int16(0)) === 1:0
@test cap(2, 0:6) === 2:2
@test cap(0:6, 2) === 2:2
@test cap(-1, 0:6) === 1:0
@test cap(0:6, -1) === 1:0
@test cap(OneTo(5), OneTo(7)) === OneTo(5)
@test cap(OneTo(9), OneTo(7)) === OneTo(7)
@test cap(1:7, 2:5) === 2:5
@test cap(2:5, 1:7) === 2:5
@test cap(1:7, 0:5) === 1:5
@test cap(0:5, 1:7) === 1:5
@test cap(1:7, 2:8) === 2:7
@test cap(2:8, 1:7) === 2:7
@test cap(2:3:9, 1:1:7) === 2:3:5
@test cap(2:3:14, 1:2:12) === 5:6:11
@test cap(14:-3:2, 1:2:12) === 5:6:11
@test (@range [1] ∩ 1) == [1]
@test (@range -7 ∩ -7) === -7:-7
@test (@range 2 ∩ 0) === 1:0
@test (@range Int16(2) ∩ Int16(0)) === 1:0
@test (@range 2 ∩ (0:6)) === 2:2
@test (@range (0:6) ∩ 2) === 2:2
@test (@range -1 ∩ (0:6)) === 1:0
@test (@range (0:6) ∩ -1) === 1:0
@test (@range OneTo(5) ∩ OneTo(7)) === OneTo(5)
@test (@range OneTo(9) ∩ OneTo(7)) === OneTo(7)
@test (@range (1:7) ∩ (2:5)) === 2:5
@test (@range (2:5) ∩ (1:7)) === 2:5
@test (@range (1:7) ∩ (0:5)) === 1:5
@test (@range (0:5) ∩ (1:7)) === 1:5
@test (@range (1:7) ∩ (2:8)) === 2:7
@test (@range (2:8) ∩ (1:7)) === 2:7
@test (@range (2:3:9) ∩ (1:1:7)) === 2:3:5
@test (@range (2:3:14) ∩ (1:2:12)) === 5:6:11
@test (@range (14:-3:2) ∩ (1:2:12)) === 5:6:11
@test (@range intersect(14:-3:2, 1:2:12)) === 5:6:11
@test (@range Base.intersect(14:-3:2, 1:2:12)) === 5:6:11
@test cap(CartesianIndex(3,4), CartesianIndex(3,4)) === CartesianIndices((3:3,4:4))
@test (@range CartesianIndex(3,4) ∩ CartesianIndex(3,4)) === CartesianIndices((3:3,4:4))
# Intersection of CartesianIndices and CartesianIndex
@test cap(CartesianIndices((2:4, 5:9)), CartesianIndex(3,5)) === CartesianIndices((3:3, 5:5))
@test (@range CartesianIndices((2:4, 5:9)) ∩ CartesianIndex(3,5)) === CartesianIndices((3:3, 5:5))
@test cap(CartesianIndices((2:4, 5:9)), CartesianIndex(1,5)) === CartesianIndices((1:0, 5:5))
@test (@range CartesianIndices((2:4, 5:9)) ∩ CartesianIndex(1,5)) === CartesianIndices((1:0, 5:5))
@test cap(CartesianIndices((2:4, 5:9)), CartesianIndex(2,3)) === CartesianIndices((2:2, 1:0))
@test (@range CartesianIndices((2:4, 5:9)) ∩ CartesianIndex(2,3)) === CartesianIndices((2:2, 1:0))
# Intersection of CartesianIndices
@test cap(CartesianIndices((2:4, 5:9)), CartesianIndices((0:3, 6:10))) === CartesianIndices((2:3, 6:9))
@test (@range CartesianIndices((2:4, 5:9)) ∩ CartesianIndices((0:3, 6:10))) === CartesianIndices((2:3, 6:9))
# Streching.
@test stretch(7, 11) === -4:18
@test stretch(Int16(7), Int16(11)) === -4:18
@test stretch(OneTo(6), 3) === -2:9
@test stretch(OneTo{Int16}(6), Int16(3)) === -2:9
@test stretch(7, 3) === 4:10
@test stretch(7, Int16(3)) === 4:10
@test_throws ArgumentError stretch(1:3:9, 2)
@test_throws ArgumentError @range (1:3:9) ± 2
@test stretch(1:3:14, 6) === -5:3:19
@test (@range (1:3:14) ± 6) === -5:3:19
@test stretch(15:-3:-1, 6) === -6:3:21
@test (@range (15:-3:-1) ± 6) === -6:3:21
let I = CartesianIndex(7,8)
@test stretch(I, 2) === CartesianIndices((5:9, 6:10))
@test (@range I ± 2) === CartesianIndices((5:9, 6:10))
@test stretch(I, (2,3)) === CartesianIndices((5:9, 5:11))
@test (@range I ± (2,3)) === CartesianIndices((5:9, 5:11))
@test stretch(I, CartesianIndex(2,3)) === CartesianIndices((5:9, 5:11))
@test (@range I ± CartesianIndex(2,3)) === CartesianIndices((5:9, 5:11))
end
let R = CartesianIndices((5:8, -1:4))
@test stretch(R, 2) === CartesianIndices((3:10, -3:6))
@test (@range R ± 2) === CartesianIndices((3:10, -3:6))
@test stretch(R, (2,3)) === CartesianIndices((3:10, -4:7))
@test (@range R ± (2,3)) === CartesianIndices((3:10, -4:7))
@test stretch(R, CartesianIndex(2,3)) === CartesianIndices((3:10, -4:7))
@test (@range R ± CartesianIndex(2,3)) === CartesianIndices((3:10, -4:7))
end
# Shrinking.
@test shrink(7, -11) === -4:18
@test shrink(Int16(7), -Int16(11)) === -4:18
@test shrink(OneTo(6), 2) === 3:4
@test shrink(OneTo{Int16}(6), Int16(2)) === 3:4
@test shrink(7, -3) === 4:10
@test shrink(7, -Int16(3)) === 4:10
@test_throws ArgumentError shrink(1:3:9, 2)
@test_throws ArgumentError @range (1:3:9) ∓ 2
@test shrink(-1:3:15, 6) === 5:3:8
@test (@range (-1:3:15) ∓ 6) === 5:3:8
@test shrink(15:-3:-1, 6) === 6:3:9
@test (@range (15:-3:-1) ∓ 6) === 6:3:9
let R = CartesianIndices((5:11, -1:6))
@test shrink(R, 2) === CartesianIndices((7:9, 1:4))
@test (@range R ∓ 2) === CartesianIndices((7:9, 1:4))
@test shrink(R, (2,3)) === CartesianIndices((7:9, 2:3))
@test (@range R ∓ (2,3)) === CartesianIndices((7:9, 2:3))
@test shrink(R, CartesianIndex(2,3)) === CartesianIndices((7:9, 2:3))
@test (@range R ∓ CartesianIndex(2,3)) === CartesianIndices((7:9, 2:3))
end
# Shift CartesianIndices by CartesianIndex.
@test (@range CartesianIndices((2:3, -1:5)) + CartesianIndex(4,-7)) ===
CartesianIndices((6:7, -8:-2))
@test (@range CartesianIndices((2:3, -1:5)) - CartesianIndex(4,-7)) ===
CartesianIndices((-2:-1, 6:12))
@test (@range CartesianIndex(4,-7) + CartesianIndices((2:3, -1:5))) ===
CartesianIndices((6:7, -8:-2))
@test (@range CartesianIndices((2:3, -1:5)) - CartesianIndex(4,-7)) ===
CartesianIndices((-2:-1, 6:12))
@test (@range OneTo(5)) === OneTo(5)
@test (@reverse_range OneTo(5)) === 5:-1:1
@test (@range 1:5) === 1:5
@test (@reverse_range 1:5) === 5:-1:1
@test (@range 5:-1:1) === 1:1:5
@test (@reverse_range 5:-1:1) === 5:-1:1
@test (@range -7:2:6) === -7:2:5
@test (@reverse_range -7:2:6) === 5:-2:-7
@test (@range 5:-2:-8) === -7:2:5
@test (@reverse_range 5:-2:-8) === 5:-2:-7
# Shift CartesianIndices by CartesianIndex (reversed).
if CARTESIAN_INDICES_MAY_HAVE_NON_UNIT_RANGES
@test (@reverse_range CartesianIndices((2:3, -1:5)) + CartesianIndex(4,-7)) ===
CartesianIndices((7:-1:6, -2:-1:-8))
@test (@reverse_range CartesianIndices((2:3, -1:5)) - CartesianIndex(4,-7)) ===
CartesianIndices((-1:-1:-2, 12:-1:6))
@test (@reverse_range CartesianIndex(4,-7) + CartesianIndices((2:3, -1:5))) ===
CartesianIndices((7:-1:6, -2:-1:-8))
@test (@reverse_range CartesianIndices((2:3, -1:5)) - CartesianIndex(4,-7)) ===
CartesianIndices((-1:-1:-2, 12:-1:6))
end
end
end # module
| EasyRanges | https://github.com/emmt/EasyRanges.jl.git |
|
[
"MIT"
] | 0.1.2 | 504869786fe9e38cf52cbbac43e68f2f102e13e3 | docs | 13226 | # EasyRanges: range expressions made easier for Julia
[](https://github.com/emmt/EasyRanges.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://ci.appveyor.com/project/emmt/EasyRanges-jl)
[](https://codecov.io/gh/emmt/EasyRanges.jl)
`EasyRanges` is a small Julia package dedicated at making life easier with
integer or Cartesian indices and ranges. This package exports macros `@range`
and `@reverse_range` which take an expression with extended syntax rules (see
below) and rewrite it to produce an `Int`-valued *index range* which may be a
step range or an instance of `CartesianIndices`. These two macros differ in the
step sign of the result: `@range` always yield ranges with non-decreasing
indices, while `@reverse_range` always yield ranges with non-increasing
indices.
Compared to range expressions with broadcast operators (`.+`, `.-`, etc.) that
are implemented by Julia, the `EasyRanges` package offers a number of
advantages:
- The code is more expressive and an extended syntax is supported.
- Computing the resulting range can be much faster and involves at most `O(d)`
storage with `d` the number of array dimensions. Note: Julia ≥ 1.9 improves
on this by being able to return an iterator, yet expressions such as `A ∩ (I
.- B)`, with `A` and `B` Cartesian ranges and `I` a Cartesian index, yield an
array of Cartesian indices.
- The `@range` macro always yields non-decreasing indices which is most
favorable for the efficiency of **loop vectorization**, for example with the
`@simd` macro of Julia or with with the `@turbo` (formerly `@avx`) macro
provided by
[`LoopVectorization`](https://github.com/JuliaSIMD/LoopVectorization.jl.git).
## Usage
```julia
using EasyRanges
```
brings two macros, `@range` and `@reverse_range`, into scope. These macros can
be used as:
```julia
@range expr
@reverse_range expr
```
to evaluate expression `expr` with special rules (see below) where integers,
Cartesian indices, and ranges of integers or of Cartesian indices are treated
specifically:
- integers are converted to `Int`, ranges to `Int`-valued ranges, and tuples of
integers to tuples of `Int`;
- arithmetic expressions only involving indices and ranges yield lightweight
and efficient ranges (of integers or of Cartesian indices);
- ranges produced by `@range` (resp. `@reverse_range`) always have positive
(resp. negative) steps;
- operators `+` and `-` can be used to [*shift*](#shift-operations) index
ranges;
- operator `∩` and method `intersect` yield the [intersection](#intersecting)
of ranges with ranges, of ranges with indices, or of indices with indices;
- operator `±` can be used to [*stretch*](#stretching) ranges or to produce
centered ranges;
- operator `∓` can be used to [*shrink*](#shrinking) ranges.
As shown in [*A working example*](#a-working-example) below, these rules are
useful for writing readable ranges in `for` loops without sacrificing
efficiency.
### Definitions
In `EasyRanges`, if *indices* are integers, *ranges* means ranges of integers
(of super-type `OrdinalRange{Int}{Int}`); if *indices* are Cartesian indices,
*ranges* means ranges of Cartesian indices (of super-type `CartesianIndices`).
### Shift operations
In `@range` and `@reverse_range` expressions, an index range `R` can be shifted
with the operators `+` and `-` by an amount specified by an index `I`:
```julia
@range R + I -> S # J ∈ S is equivalent to J - I ∈ R
@range R - I -> S # J ∈ S is equivalent to J + I ∈ R
@range I + R -> S # J ∈ S is equivalent to J - I ∈ R
@range I - R -> S # J ∈ S is equivalent to I - J ∈ R
```
Integer-valued ranges can be shifted by an integer offset:
```julia
@range (3:6) + 1 -> 4:7 # (2:6) .+ 1 -> 4:7
@range 1 + (3:6) -> 4:7 # (2:6) .+ 1 -> 4:7
@range (2:4:10) + 1 -> 3:4:11 # (2:4:10) .+ 1 -> 3:4:11
@range (3:6) - 1 -> 2:5 # (3:6) .- 1 -> 2:5
@range 1 - (3:6) -> -5:-2 # 1 .- (3:6) -> -2:-1:-5
```
This is like using the broadcasting operators `.+` and `.-` except that the
result is an `Int`-valued range and that the step sign is kept positive (as in
the last above example).
The `@reverse_macro` yields ranges with negative steps:
```julia
@reverse_range (3:6) + 1 -> 7:-1:4
@reverse_range 1 + (3:6) -> 7:-1:4
@reverse_range (3:6) - 1 -> 5:-1:1
@reverse_range 1 - (3:6) -> -1:-1:-5
```
Cartesian ranges can be shifted by a Cartesian index (without penalties on the
execution time and, usually, no extra allocations):
```julia
@range CartesianIndices((2:6, -1:2)) + CartesianIndex(1,3)
# -> CartesianIndices((3:7, 2:5))
@range CartesianIndex(1,3) + CartesianIndices((2:6, -1:2))
# -> CartesianIndices((3:7, 2:5))
@range CartesianIndices((2:6, -1:2)) - CartesianIndex(1,3)
# -> CartesianIndices((1:5, -4:-1))
@range CartesianIndex(1,3) - CartesianIndices((2:6, -1:2))
# -> CartesianIndices((-5:-1, 1:4))
```
This is similar to the broadcasting operators `.+` and `.-` except that a
lightweight instance of `CartesianIndices` with positive increment is always
produced.
### Intersecting
In `@range` and `@reverse_range` expressions, the operator `∩` (obtained by
typing `\cap` and pressing the `[tab]` key at the REPL) and the method
`intersect` yield the intersection of ranges with ranges, of ranges with
indices, or of indices with indices.
The intersection of indices, say `I` and `J`, yield a range `R` (empty if the
integers are different):
```julia
@range I ∩ J -> R # R = {I} if I == J, R = {} else
```
Examples:
```julia
@range 3 ∩ 3 -> 3:3
@range 3 ∩ 2 -> 1:0 # empty range
@range CartesianIndex(3,4) ∩ CartesianIndex(3,4) -> CartesianIndices((3:3,4:4))
```
The intersection of an index range `R` and an index `I` yields an index range
`S` that is either the singleton `{I}` (if `I` belongs to `R`) or empty (if `I`
does not belong to `R`):
```julia
@range R ∩ I -> S # S = {I} if I ∈ R, S = {} else
@range I ∩ R -> S # idem
```
Examples:
```julia
@range (2:6) ∩ 3 -> 3:3 # a singleton range
@range 1 ∩ (2:6) -> 1:0 # an empty range
@range (2:6) ∩ (3:7) -> 3:6 # intersection of ranges
@range CartesianIndices((2:4, 5:9)) ∩ CartesianIndex(3,7))
-> CartesianIndices((3:3, 7:7))
```
These syntaxes are already supported by Julia, but the `@range` macro
guarantees to return an `Int`-valued range with a forward (positive) step.
### Stretching
In `@range` and `@reverse_range` expressions, the operator `±` (obtained by
typing `\pm` and pressing the `[tab]` key at the REPL) can be used to
**stretch** ranges or to produce **centered ranges**.
The expression `R ± I` yields the index range `R` stretched by an amount
specified by index `I`. Assuming `R` is unit range:
```julia
@range R ± I -> (first(R) - I):(last(R) + I)
```
where, if `R` is a range of integers, `I` is an integer, and if `R` is a
`N`-dimensional Cartesian, `I` is a `N`-dimensional Cartesian index range. Not
shown in the above expression, the range step is preserved by the operation
(except that the result has a positive step).
The expression `I ± ΔI` with `I` an index and `ΔI` an index offset yields an
index range centered at `I`. Assuming `R` is unit range:
```julia
@range I ± ΔI -> (I - ΔI):(I + ΔI)
```
There is no sign correction and the range may be empty. If `I` and `ΔI` are two
integers, `I ± ΔI` is a range of integers. If `I` is a `N`-dimensional
Cartesian index, then `I ± ΔI` is a range of Cartesian indices and `ΔI` can be
an integer, a `N`-tuple of integers, or a `N`-dimensional Cartesian index.
Specifying `ΔI` as a single integer for a `N`-dimensional Cartesian index `I`
is identical to specifying the same amount of stretching for each dimension.
### Shrinking
In `@range` and `@reverse_range` expressions, the operator `∓` (obtained by
typing `\mp` and pressing the `[tab]` key at the REPL) can be used to
**shrink** ranges.
The expression `R ∓ I` yields the same result as `@range R ± (-I)`, that is the
index range `R` shrink by an amount specified by index `I`:
```julia
@range R ∓ I -> (first(R) + I):(last(R) - I)
```
## Installation
The `EasyRanges` package is an official Julia package and can be installed as
follows:
```julia
using Pkg
pkg"add EasyRanges"
```
## A working example
`EasyRanges` may be very useful to write readable expressions in ranges used by
`for` loops. For instance, suppose that you want to compute a **discrete
correlation** of `A` by `B` as follows:
$$
C[i] = \sum_{j} A[j] B[j-i]
$$
and for all valid indices `i` and `j`. Assuming `A`, `B` and `C` are abstract
vectors, the Julia equivalent code is:
```julia
for i ∈ eachindex(C)
s = zero(T)
j_first = max(firstindex(A), firstindex(B) + i)
j_last = min(lastindex(A), lastindex(B) + i)
for j ∈ j_first:j_last
s += A[j]*B[j-i]
end
C[i] = s
end
```
where `T` is a suitable type, say `T = promote_type(eltype(A), eltype(B))`. The
above expressions of `j_first` and `j_last` are to ensure that `A[j]` and
`B[j-i]` are in bounds. The same code for multidimensional arrays writes:
```julia
for i ∈ CartesianIndices(C)
s = zero(T)
j_first = max(first(CartesianIndices(A)),
first(CartesianIndices(B)) + i)
j_last = min(last(CartesianIndices(A)),
last(CartesianIndices(B)) + i)
for j ∈ j_first:j_last
s += A[j]*B[j-i]
end
C[i] = s
end
```
now `i` and `j` are multidimensional Cartesian indices and Julia already helps
a lot by making such a code applicable whatever the number of dimensions. Note
that the syntax `j_first:j_last` is supported for Cartesian indices since Julia
1.1. There is more such syntactic sugar and using the broadcasting operator
`.+` and the operator `∩` (a shortcut for the function `intersect`), the code
can be rewritten as:
```julia
for i ∈ CartesianIndices(C)
s = zero(T)
for j ∈ CartesianIndices(A) ∩ (CartesianIndices(B) .+ i)
s += A[j]*B[j-i]
end
C[i] = s
end
```
which is not less efficient and yet much more readable. Indeed, the statement
```julia
for j ∈ CartesianIndices(A) ∩ (CartesianIndices(B) .+ i)
```
makes it clear that the loop is for all indices `j` such that `j ∈
CartesianIndices(A)` and `j - i ∈ CartesianIndices(B)` which is required to
have `A[j]` and `B[j-i]` in bounds. The same principles can be applied to the
uni-dimensional code:
```julia
for i ∈ eachindex(C)
s = zero(T)
for j ∈ eachindex(A) ∩ (eachindex(B) .+ i)
s += A[j]*B[j-i]
end
C[i] = s
end
```
Now suppose that you want to compute the **discrete convolution** instead:
$$
C[i] = \sum_{j} A[j] B[i-j]
$$
Then, the code for multi-dimensional arrays writes:
```julia
for i ∈ CartesianIndices(C)
s = zero(T)
for j ∈ CartesianIndices(A) ∩ (i .- CartesianIndices(B))
s += A[j]*B[i-j]
end
C[i] = s
end
```
because you want to have `j ∈ CartesianIndices(A)` and `i - j ∈
CartesianIndices(B)`, the latter being equivalent to `j ∈ i -
CartesianIndices(B)`.
This simple change however results in **a dramatic slowdown** because the
expression `i .- CartesianIndices(B)` yields an array of Cartesian indices
while the expression `CartesianIndices(B) .- i` yields an instance of
`CartesianIndices`. As an example, the discrete convolution of a 32×32 array by
a 8×8 array in single precision floating-point takes 30.3 ms or 88.5 ms on my
laptop (Intel Core i7-5500U CPU at 2.40GHz) depending on the order of the
operands and 40Mb of memory compared to 5.6 μs or 35.8 µs and no additional
memory for a discrete correlation (all with `@inbounds` and `@simd` of course).
Hence a slowdown by a factor of 5410 or 2570 for the same number of
floating-point operations.
Using the `@range` macro of `EasyRanges`, the discrete correlation and discrete
convolution write:
```julia
# Discrete correlation.
for i ∈ CartesianIndices(C)
s = zero(T)
for j ∈ @range CartesianIndices(A) ∩ (i + CartesianIndices(B))
s += A[j]*B[j-i]
end
C[i] = s
end
# Discrete convolution.
for i ∈ CartesianIndices(C)
s = zero(T)
for j ∈ @range CartesianIndices(A) ∩ (i - CartesianIndices(B))
s += A[j]*B[i-j]
end
C[i] = s
end
```
which do not require the broadcasting operators `.+` and `.-` and which do not
have the aforementioned issue. Using the macros `@range` and `@reverse_range`
have other advantages:
- The result is guaranteed to be `Int`-valued (needed for efficient indexing).
- The *step*, that is the increment between consecutive indices, in the result
has a given direction: `@range` always yields a non-negative step (which is
favorable for loop vectorization), while `@reverse_range` always yields a
non-positive step.
- The syntax of range expressions is simplified and extended for other
operators (like `±` for stretching or `∓` for shrinking) that are not
available in the base Julia. This syntax can be extended as the package is
developed without disturbing other packages (i.e., no type-piracy).
| EasyRanges | https://github.com/emmt/EasyRanges.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 597 | ### ScRNAseq.jl
###
### A julia package for scRNA-seq exploratory data analysis
##
### This file is NOT YET a part of BioJulia.
### License is MIT: https://github.com/BioJulia/BioSequences.jl/blob/master/LICENSE.md
module ScRNAseq
export
Fileio,
Qualitycontrol,
Transformation,
Embedding
include("helper.jl")
include("fileio/fileio.jl")
include("qualitycontrol/qualitycontrol.jl")
include("transformation/transformation.jl")
include("embedding/embedding.jl")
using .Fileio
using .Qualitycontrol
using .Transformation
using .Embedding
end # module ScRNAseq | ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 508 | # using TableReader
rescale(A; dims=1) = (A .- mean(A, dims=dims)) ./ max.(std(A, dims=dims), eps())
# vecnorm(x) = x./norm.(x[:,i] for i in 1:size(x,2))'
vecnorm(x::AbstractMatrix) = norm.(x[:,i] for i in 1:size(x,2))
function normc!(x)
for i in 1:size(x,2)
x[:,i]=x[:,i]./norm(x[:,i])
end
end
#=
function [A,G,H] = pythagoreanMeans(list)
A = mean(list); % arithmetic mean
G = exp(mean(log(list))); % geometric mean
H = 1./mean(1./list); % harmonic mean
end
=#
| ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 99 | module Embedding
export
umap,
tsne
include("umap.jl")
include("tsne.jl")
end | ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 121 | import TSne, Plots
function tsne(X)
X = convert(Array{Float64,2}, X);
Y = TSne.tsne(X, 3);
return Y
end
| ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 498 | import UMAP, Plots
function umap(X)
X2 = convert(Array{Float64,2}, X)
Y = UMAP.umap(X2, 3) # ,2;n_neighbors=5);
f2 = Plots.plot(Y[1, :], Y[2, :], seriestype = :scatter)
Y = Y';
# theplot = scatter(Y[:,1], Y[:,2], marker=(2,2,:auto,stroke(0))) # , color=Int.(allabels[1:size(Y,1)]))
#=
f = scatter3d(
Y[:, 1],
Y[:, 2],
Y[:, 3],
marker = (2, 2, :auto, stroke(0)),
color = Int.(sum(X, dims = 1)),
)
=#
return Y
end
| ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 943 | module Fileio
using DelimitedFiles, MatrixMarket, UnicodePlots, MAT, CSV, DataFrames
export readmm,
readtx,
readgl,
showx
function readmm(filename)
# read MatrixMarket file
X=mmread(filename);
end
function readcsv(filename)
# read CSV file with header and row name
df = CSV.File(filename; datarow=2) |> DataFrame!
X=convert(Matrix, df[:,2:end])
genelist=df[:,1]
return X,genelist
end
function readmt(filename)
# read Matlab Mat file
file=matopen(filename)
X=read(file,"X")
genelist=read(file,"genelist")
close(file)
X = convert(Array{Float64,2}, X)
return X,genelist
end
function readtx(filename)
# read DLM text file
X=readdlm(filename,',',Int16)
end
function readgl(filename::String,colidx::Integer=1)
# read genelist
genelist=readdlm(filename,'\t',String)
genelist=vec(genelist[:,colidx])
end
function showx(X)
spy(X)
end
end | ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 178 |
# using MarketMatrix
# https://imgur.com/a/y3C0Vd2
println(pwd())
cd(dirname(@__FILE__))
println(pwd())
using MatrixMarket
A=mmread("Ydf_matrix.mtx")
using UnicodePlots
spy(A) | ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 2472 | using LinearAlgebra, Statistics, MAT, Arpack
# cd("C:\\Users\\jcai.AUTH\\Documents\\GitHub\\julia_test\\pcnet")
# file=matopen("testdata.mat")
# X=read(file,"X")
# A0=read(file,"A")
# close(file)
# https://discourse.julialang.org/t/how-to-get-the-principal-components-and-variances-from-multivariatestats/15843/4
"""
pca(data)
perform PCA using SVD
inputs:
- data: M x N matrix of input data. (M dimensions, N trials)
outputs:
- PC: each column is a principle component
- V: M x 1 matrix of variances
"""
function pca(data::Array{T,2}) where T
X = data .- mean(data, dims=2)
Y = X' ./ sqrt(T(size(X,2)-1))
U,S,PC = svd(Y)
S = diagm(0=>S)
V = S .* S
# find the least variance vector
indexList = sortperm(diag(V); rev=true)
# PCs = map(x->PC[:,x], indexList)
return PC, diag(V)[indexList]
end
X=randn(Float64, (20,4))
pc0,d0=pca(collect(X'))
using MultivariateStats
p=fit(PCA,X')
pc1=p.proj
d1=p.prinvars
function pca2(X; k::Int=3)
X=X.-mean(X,dims=1)
Σ = X'X./(size(X,1)-1) # Covariance natrix
# D,V = eigen(Σ,sortby=-) # Factorise into Σ = U * diagm(S) * V'
D,V = eigen(Σ,sortby=x -> -abs(x)) # Factorise into Σ = U * diagm(S) * V'
# sortby = x -> -abs(x)
Xrot = X*V # Rotate onto the basis defined by U
# pvar = sum(D[1:k]) / sum(D) # Percentage of variance retained with top k vectors
# X̃ = Xrot[:,1:k] # Keep top k vectors
return V, D
end
pc2,d2=pca2(X)
function pca3(X, k::Int=3)
X=X.-mean(X,dims=1)
X=X./sqrt(size(X,1)-1)
# var(X,dims=1)
F = svd(X) # Factorise into Σ = U * diagm(S) * V'
# Xrot = X*F.V # Rotate onto the basis defined by U
# pvar = sum(F.S[1:k]) / sum(F.S) # Percentage of variance retained with top k vectors
# X̃ = Xrot[:,1:k] # Keep top k vectors
return F.V, F.S.*F.S
end
pc3,d3=pca3(X)
function pca4(X, k::Int=3)
X=X.-mean(X,dims=1)
X=X./sqrt(size(X,1)-1)
# var(X,dims=1)
F = svds(X;nsv=3)[1] # Factorise into Σ = U * diagm(S) * V'
# Xrot = X*F.V # Rotate onto the basis defined by U
# pvar = sum(F.S[1:k]) / sum(F.S) # Percentage of variance retained with top k vectors
# X̃ = Xrot[:,1:k] # Keep top k vectors
return F.V, F.S.*F.S
end
pc4,d4=pca4(X)
[d0 d1 d2 d3 d4]
```
pc0
pc1
pc2
pc3
```
| ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 2172 |
using LinearAlgebra, Statistics, MAT, Arpack, MultivariateStats, Random
# p=fit(PCA,X')
# cd("C:\\Users\\jcai.AUTH\\Documents\\GitHub\\julia_test\\pcnet")
# file=matopen("testdata.mat")
# X=read(file,"X")
# A0=read(file,"A")
# close(file)
rng = MersenneTwister(1234);
X=randn(rng,Float64,(200,300));
function pcnetwork1(X)
n=size(X,2)
A=1.0 .-Matrix(I,n,n)
for k in 1:n
y=X[:,k]
𝒳=X[:,1:end.≠k]
F=svds(𝒳,nsv=3)[1]
ϕ=F.V
s=𝒳*ϕ
s ./= (norm.(s[:,i] for i=1:size(s,2)).^2)'
b=sum(y.*s,dims=1)
𝒷=ϕ*b'
A[k,A[k,:].==1.0]=𝒷
end
return A
end
function pcnetwork2(X)
n=size(X,2)
A=1.0 .-Matrix(I,n,n)
for k in 1:n
y=X[:,k]
𝒳=X[:,1:end.≠k]
F=svd(𝒳)
ϕ=F.V[:,1:3]
s=𝒳*ϕ
s ./=(norm.(s[:,i] for i=1:size(s,2)).^2)'
b=sum(y.*s,dims=1)
𝒷=ϕ*b'
A[k,A[k,:].==1.0]=𝒷
end
return A
end
function pcnetwork3(X)
n=size(X,2)
A=1.0 .-Matrix(I,n,n)
for k in 1:n
y=X[:,k]
𝒳=X[:,1:end.≠k]
# _,v=eigen(𝒳'𝒳,sortby=-)
# v=eigvecs(𝒳'𝒳,sortby=-)
# v=v[:,1:3]
_,ϕ=eigs(𝒳'𝒳,nev=3,which=:LM)
s=𝒳*ϕ
s ./=(norm.(s[:,i] for i=1:size(s,2)).^2)'
b=sum(y.*s,dims=1)
𝒷=ϕ*b'
A[k,A[k,:].==1.0]=𝒷
end
return A
end
function pcnetwork4(X)
n=size(X,2)
A=1.0 .-Matrix(I,n,n)
for k in 1:n
y=X[:,k]
𝒳=X[:,1:end.≠k]
p=fit(PCA,𝒳')
v=p.proj
v=v[:,1:3]
s=𝒳*v
s=s./(norm.(s[:,i] for i=1:size(s,2)).^2)'
b=sum(y.*s,dims=1)
𝒷=v*b'
A[k,A[k,:].==1.0]=𝒷
end
return A
end
function pcnetwork5(X)
# http://hua-zhou.github.io/teaching/biostatm280-2017spring/slides/16-eigsvd/eigsvd.html
n=size(X,2)
A=1.0 .-Matrix(I,n,n)
for k in 1:n
y=X[:,k]
𝒳=X[:,1:end.≠k]
# _,v=eigen(𝒳'𝒳,sortby=-)
# v=eigvecs(𝒳'𝒳,sortby=-)
# v=v[:,1:3]
U,S,V=svd(𝒳)
ϕ=V[:,1:3]
b=V*inv(diagm(S))*U'*y
𝒷=ϕ*b'
A[k,A[k,:].==1.0]=𝒷
end
return A
end
@time A1=pcnetwork1(X);
@time A2=pcnetwork2(X);
@time A3=pcnetwork3(X);
# @time A5=pcnetwork5(X);
# @time A4=pcnetwork4(X);
A1≈A2≈A3
# A1≈A5 | ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 618 | println("hello")
using DelimitedFiles
a=readdlm("GSM3204305_P_N_Expr_999cells.csv",',');
b=a[2:end,2:end]
using CSV, DataFrames, Statistics
# CSV.File("GSM3204305_P_N_Expr_999cells.csv"; datarow=2)
df = CSV.File("GSM3204305_P_N_Expr_999cells.csv"; datarow=2) |> DataFrame!
X=convert(Matrix, df[:,2:end])
typeof(X)
# df = DataFrame(x = rand(3),w=rand(3))
# dv = @data([NA, 3, 2, 5, 4])
# mean(dv)
# b=readtable("GSM3204305_P_N_Expr_999cells.csv")
libsize=sum(X,dims=1)
libsize[libsize.>20000]
filter(x -> x > 20000, libsize)
a = [1 2; 3 4]
a[a .== 1]
a[[false true; false true]]
X[[X[:,1].>=0,libsize.>20000]]
| ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 722 | # Pkg.add("TSne")
using Statistics, DelimitedFiles
a = readdlm("GSM3204305_P_N_Expr_999cells.csv", ',');
X = a[2:end, 2:end]
# X=a[2:500,2:500]
rescale(A; dims = 1) =
(A .- mean(A, dims = dims)) ./ max.(std(A, dims = dims), eps())
# using TSne
# Y = tsne(X', 2, 50, 1000, 20.0); # samples in row
using UMAP, Plots
X2 = convert(Array{Float64,2}, X)
Y = umap(X2, 3) # ,2;n_neighbors=5);
f2 = plot(Y[1, :], Y[2, :], seriestype = :scatter)
Y = Y';
# theplot = scatter(Y[:,1], Y[:,2], marker=(2,2,:auto,stroke(0))) # , color=Int.(allabels[1:size(Y,1)]))
f = scatter3d(
Y[:, 1],
Y[:, 2],
Y[:, 3],
marker = (2, 2, :auto, stroke(0)),
color = Int.(sum(X, dims = 1)),
)
# Plots.pdf(f, "myplot.pdf")
| ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 512 | #using Pkg
#Pkg.add("DelimitedFiles")
#Pkg.add("MAT")
#Pkg.add("Arpack")
using Statistics, DelimitedFiles, LinearAlgebra, Statistics, MAT, Arpack, Pkg
cd("E:\\GitHub\\julia_test\\scrnaseq_code")
file=matopen("s1131_cr.mat")
X=read(file,"X");
s=read(file,"t_sne");
close(file)
cd("..")
X = convert(Array{Float64,2}, X)
# Pkg.activate("E:\\GitHub\\julia_test\\pcnet\\pcnet.jl")
using pcrnet
#@time A1=pcrnet.pcnetwork1(rand(200,300));
#@time A3=pcrnet.pcnetwork3(rand(200,300));
@time A=pcrnet.pcnetwork3(X');
| ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 154 |
module Normalization
export
norm_libsize
# using StatsBase
function norm_libsize(X)
lbsz=sum(X,dims=1)
X=(X./lbsz)*1e4;
return X
end
end
| ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 599 | module Qualitycontrol
using Statistics, SparseArrays
export selectg,
scstats,
emptyrate
function ismtgene(genelist)
startswith.(uppercase.(genelist),"MT-")
end
function selectg(X,genelist)
ng=size(X,1);
i=vec(sum(!iszero,X,dims=2)./ng.>0.05)
X=X[i,:];
genelist=genelist[i];
return X,genelist
end
function scstats(X)
logmean=log10.(mean(X,dims=2));
logvar=log10.(var(X,dims=2));
dropoutrate=mean(X.==0,dims=2);
return logmean,logvar,dropoutrate
end
function emptyrate(X)
# nnz(sparse(X))
count(!iszero,X)./count(isreal,X)
end
end | ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 205 | function pearsonresiduals(X)
u=(sum(X,dims=2)*sum(X,dims=1))./sum(X);
s=sqrt.(u+(u.^2)./100);
X=(X-u)./s;
n=size(X,2);
sn=sqrt(n);
X[X.>sn].=sn;
X[X.<-sn].=-sn;
return X
end | ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 95 | module Transformation
export pearsonresiduals
include("pearsonresiduals.jl")
end | ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 414 | using ScRNAseq # obviously the tests use the ScTenifoldNet module...
using DelimitedFiles
cd(dirname(@__FILE__))
X=readdlm("X.txt",',',Int16)
genelist=vec(readdlm("genelist.txt",String))
X,genelist=ScRNAseq.Qualitycontrol.selectg(X,genelist)
X1=ScRNAseq.Transformation.pearsonresiduals(X)
Y=ScRNAseq.Embedding.umap(X1)
# Y2=ScRNAseq.Embedding.tsne(X)
using Plots
scatter(Y[:,1],Y[:,2])
| ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 402 | push!(LOAD_PATH,"E:/GitHub/ScRNAseq.jl/src/");
using ScRNAseq # obviously the tests use the ScTenifoldNet module...
using Test # and the Base.Test module...
tests = ["code_test1"] # the test file names are stored as strings...
for t in tests
include("$(t).jl") # ... so that they can be evaluated in a loop
end | ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.1.0 | 4fd0bc928ee1f2759527e1e3115e679ed9a3d6b8 | code | 2493 | using DelimitedFiles
cd(dirname(@__FILE__))
X=readdlm("X.txt",',',Int16)
genelist=vec(readdlm("genelist.txt",String))
using Statistics
glogmean=log10.(mean(X,dims=2));
glogvar=log10.(var(X,dims=2));
dropoutrate=mean(X.==0,dims=2);
using SparseArrays
nnz(sparse(X))
count(!iszero,X)
using Plots
Plots.scatter(glogmean,glogvar)
Plots.scatter(glogmean,1 .-dropoutrate)
using Distributions
# fit_mle(NegativeBinomial,X[3,:].+0.0)
fit_mle(Normal,X[3,:].+0.0)
# fit(Normal,X[3,:].+0.0)
# using GLM
# http://naobioml.blogspot.com/2017/02/how-to-fit-count-data-with-negative.html
data=X[3,:].+0.0;
function f2(x::Vector)
sum = 0
for i in data
sum += log(pdf(NegativeBinomial(x[1], x[2]), i))
end
return -1*sum
end
using Optim, Distributions
lower = [0.0, 0.0]
upper = [Inf, 1]
initial_x = [0.5, 0.5]
# x2 = optimize(DifferentiableFunction(f2), initial_x, lower, upper, Fminbox(), optimizer = GradientDescent)
x2 = optimize(f2, [0.0, 0.0], [Inf, 1],[0.5, 0.5])
function f1(x)
sum = 0
for i in data
sum += log(pdf(Poisson(x), i))
end
-1*sum
end
x1 = optimize(f1, 0.0, 7)
#=
------------
using Distributions, Optim, StatsPlots
julia> data = vcat([0 for i in 1:70],
[1 for i in 1:38],
[2 for i in 1:17],
[3 for i in 1:10],
[4 for i in 1:9],
[5 for i in 1:3],
[6 for i in 1:2],
[7 for i in 1:1]);
julia> function f2(x)
sum = 0.0
for i in data
sum += log(pdf(NegativeBinomial(x[1], x[2]), i))
end
return -sum
end
f2 (generic function with 1 method)
julia> opt_result = optimize(f2, [0.0, 0.0], [Inf, 1],[0.5, 0.5])
* Status: success
* Candidate solution
Minimizer: [1.02e+00, 4.72e-01]
Minimum: 2.224372e+02
* Found with
Algorithm: Fminbox with L-BFGS
Initial Point: [5.00e-01, 5.00e-01]
* Convergence measures
|x - x'| = 0.00e+00 ≤ 0.0e+00
|x - x'|/|x'| = 0.00e+00 ≤ 0.0e+00
|f(x) - f(x')| = 0.00e+00 ≤ 0.0e+00
|f(x) - f(x')|/|f(x')| = 0.00e+00 ≤ 0.0e+00
|g(x)| = 7.79e-08 ≰ 1.0e-08
* Work counters
Seconds run: 0 (vs limit Inf)
Iterations: 4
f(x) calls: 226
∇f(x) calls: 226
julia> histogram(data, normalize = true, label = "Data", alpha = 0.5, linecolor = "white"); plot!(NegativeBinomial(opt_result.minimizer[1], opt_result.minimizer[2]), label = "Negative Binomial fit")
=# | ScRNAseq | https://github.com/jamesjcai/ScRNAseq.jl.git |
|
[
"MIT"
] | 0.2.0 | 90af6257edf8e0a40a94105db3bf4233a97f2136 | code | 765 | using Documenter, FresnelIntegrals
using Documenter.Remotes
makedocs(sitename="FresnelIntegrals.jl")
DocMeta.setdocmeta!(FresnelIntegrals, :DocTestSetup, :(using FresnelIntegrals); recursive=true)
makedocs(;
modules=[FresnelIntegrals],
authors="Kiran Shila <[email protected]> and contributors",
repo=Remotes.GitHub("kiranshila","FresnelIntegrals.jl"),
sitename="FresnelIntegrals.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://kiranshila.github.io/FresnelIntegrals.jl",
edit_link="master",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/kiranshila/FresnelIntegrals.jl.git",
devbranch="master",
) | FresnelIntegrals | https://github.com/kiranshila/FresnelIntegrals.jl.git |
|
[
"MIT"
] | 0.2.0 | 90af6257edf8e0a40a94105db3bf4233a97f2136 | code | 2590 | module FresnelIntegrals
using SpecialFunctions
using IrrationalConstants: sqrtπ
export fresnelc, fresnels, fresnel
"""
fresnelc(z::Number)
Calculate the normalized Fresnel cosine integral
```math
C(z) = \\int_{0}^{z} \\cos{\\left(\\frac{\\pi t^2}{2}\\right)} \\, \\mathrm{d}t
```
for the number ``z``.
"""
function fresnelc(z::Number)
x = (z * sqrtπ) / 2
re_x, im_x = reim(x)
a = (re_x + im_x) + (im_x - re_x) * im
b = (re_x - im_x) + (im_x + re_x) * im
re_erf_a, im_erf_a = reim(erf(a))
re_erf_b, im_erf_b = reim(erf(b))
re_y = (re_erf_a - im_erf_a + re_erf_b + im_erf_b) / 4
im_y = (im_erf_a + re_erf_a - re_erf_b + im_erf_b) / 4
y = re_y + im_y * im
return y
end
function fresnelc(z::Real)
x = (z * sqrtπ) / 2
a = x + x * im
re_erf_a, im_erf_a = reim(erf(a))
y = (re_erf_a + im_erf_a) / 2
return y
end
"""
fresnels(z::Number)
Calculate the normalized Fresnel sine integral
```math
S(z) = \\int_{0}^{z} \\sin{\\left(\\frac{\\pi t^2}{2}\\right)} \\, \\mathrm{d}t
```
for the number ``z``.
"""
function fresnels(z::Number)
x = (z * sqrtπ) / 2
re_x, im_x = reim(x)
a = (re_x + im_x) + (im_x - re_x) * im
b = (re_x - im_x) + (im_x + re_x) * im
re_erf_a, im_erf_a = reim(erf(a))
re_erf_b, im_erf_b = reim(erf(b))
re_y = (re_erf_a + im_erf_a + re_erf_b - im_erf_b) / 4
im_y = (im_erf_a - re_erf_a + re_erf_b + im_erf_b) / 4
y = re_y + im_y * im
return y
end
function fresnels(z::Real)
x = (z * sqrtπ) / 2
a = x + x * im
re_erf_a, im_erf_a = reim(erf(a))
y = (re_erf_a - im_erf_a) / 2
return y
end
"""
fresnel(z::Number)
Calculate the normalized cosine and sine fresnel integrals.
See also [`fresnels`](@ref), [`fresnelc`](@ref).
"""
function fresnel(z::Number)
x = (z * sqrtπ) / 2
re_x, im_x = reim(x)
a = (re_x + im_x) + (im_x - re_x) * im
b = (re_x - im_x) + (im_x + re_x) * im
re_erf_a, im_erf_a = reim(erf(a))
re_erf_b, im_erf_b = reim(erf(b))
re_y_sin = (re_erf_a + im_erf_a + re_erf_b - im_erf_b) / 4
im_y_sin = (im_erf_a - re_erf_a + re_erf_b + im_erf_b) / 4
re_y_cos = (re_erf_a - im_erf_a + re_erf_b + im_erf_b) / 4
im_y_cos = (im_erf_a + re_erf_a - re_erf_b + im_erf_b) / 4
y_sin = re_y_sin + im_y_sin * im
y_cos = re_y_cos + im_y_cos * im
return (y_cos, y_sin)
end
function fresnel(z::Real)
x = (z * sqrtπ) / 2
a = x + x * im
re_erf_a, im_erf_a = reim(erf(a))
y_sin = (re_erf_a - im_erf_a) / 2
y_cos = (re_erf_a + im_erf_a) / 2
return (y_cos, y_sin)
end
end # module | FresnelIntegrals | https://github.com/kiranshila/FresnelIntegrals.jl.git |
|
[
"MIT"
] | 0.2.0 | 90af6257edf8e0a40a94105db3bf4233a97f2136 | code | 1003 | using FresnelIntegrals
using Test
using QuadGK
@testset "FresnelIntegrals.jl" begin
# Generate random complex number
z = randn(ComplexF64)
# Test by comparing to numeric solution
@test fresnelc(z) ≈ quadgk(t->cos(π*t^2/2),0,z)[1]
@test fresnels(z) ≈ quadgk(t->sin(π*t^2/2),0,z)[1]
# Test just for code coverage 😄
@test (fresnelc(z),fresnels(z)) == fresnel(z)
# Generate random real number
z = randn(Float64)
# Test by comparing to numeric solution
@test fresnelc(z) ≈ quadgk(t->cos(π*t^2/2),0,z)[1]
@test fresnels(z) ≈ quadgk(t->sin(π*t^2/2),0,z)[1]
# Test just for code coverage 😄
@test (fresnelc(z),fresnels(z)) == fresnel(z)
# Precise values come from WolframAlpha calculator
# One could add more decimals and more tests if needed
@test fresnels(1.) ≈ 0.4382591473903
@test fresnelc(1.) ≈ 0.7798934003768
@test fresnels(sqrt(2)*im) ≈ -0.7139722140219*im
@test fresnelc(sqrt(2)*im) ≈ 0.5288915951112*im
end
| FresnelIntegrals | https://github.com/kiranshila/FresnelIntegrals.jl.git |
|
[
"MIT"
] | 0.2.0 | 90af6257edf8e0a40a94105db3bf4233a97f2136 | docs | 2252 | # FresnelIntegrals.jl
[](https://kiranshila.github.io/FresnelIntegrals.jl/stable/)
[](https://kiranshila.github.io/FresnelIntegrals.jl/dev/)
[](https://github.com/kiranshila/FresnelIntegrals.jl/actions/workflows/CI.yml?query=branch%3Amaster)
[](https://codecov.io/gh/kiranshila/FresnelIntegrals.jl)
A quick Julia library for calculating Fresnel Integrals using the error function from [SpecialFunctions.jl](https://github.com/JuliaMath/SpecialFunctions.jl).
```julia
using FresnelIntegrals
using Plots
z = -30:0.001:30
plot(x->real(fresnelc(x)), x->real(fresnels(x)),z,legend=false)
title!("Euler Spiral")
xlabel!("C(z)")
ylabel!("S(z)")
```

As expected, this is much faster than the numeric integral solution
```julia
using QuadGK
using FresnelIntegrals
julia> @benchmark fresnelc(1.8)
BenchmarkTools.Trial: 10000 samples with 772 evaluations.
Range (min … max): 161.448 ns … 1.744 μs ┊ GC (min … max): 0.00% … 0.00%
Time (median): 163.927 ns ┊ GC (median): 0.00%
Time (mean ± σ): 164.460 ns ± 16.519 ns ┊ GC (mean ± σ): 0.00% ± 0.00%
▃▄▂ ▁▇█▄ ▁ ▃▄▂▁ ▁▁▁ ▂
███▇▁▃▁▁▁▁▁▁▁▁▁▁▅▇▅█████▁▄▄▄▆▆▅▆▆█▇▆█████▇▆▅▅▇██████▇▇▆▇▇▆▅▆ █
161 ns Histogram: log(frequency) by time 168 ns <
Memory estimate: 0 bytes, allocs estimate: 0.
julia> @benchmark quadgk(t->cos(π*t^2/2),0,1.8)
BenchmarkTools.Trial: 10000 samples with 187 evaluations.
Range (min … max): 548.321 ns … 48.949 μs ┊ GC (min … max): 0.00% … 98.43%
Time (median): 554.893 ns ┊ GC (median): 0.00%
Time (mean ± σ): 599.010 ns ± 558.147 ns ┊ GC (mean ± σ): 2.25% ± 3.29%
▅█▆▂▄▃▃▁▁▁▁▁ ▂▁ ▁▁▁▁▂▁ ▁
███████████████▇▅▆▅▄▃▅▄▅▅▅▄▄▃▄████████▇▇▆▆▇▆▆▆▇▇▇▇▇█████████▇ █
548 ns Histogram: log(frequency) by time 754 ns <
Memory estimate: 368 bytes, allocs estimate: 2.
``` | FresnelIntegrals | https://github.com/kiranshila/FresnelIntegrals.jl.git |
|
[
"MIT"
] | 0.2.0 | 90af6257edf8e0a40a94105db3bf4233a97f2136 | docs | 287 | ```@meta
CurrentModule = FresnelIntegrals
```
# FresnelIntegrals
A quick Julia library for calculating Fresnel Integrals using the error function from [SpecialFunctions.jl](https://github.com/JuliaMath/SpecialFunctions.jl).
## Functions
```@autodocs
Modules = [FresnelIntegrals]
```
| FresnelIntegrals | https://github.com/kiranshila/FresnelIntegrals.jl.git |
|
[
"MIT"
] | 1.1.0 | 88eb6c42d1fc48144dfd2bb69f359ef6fb0adb59 | code | 204 | push!(LOAD_PATH, "../src/")
using Documenter
using BasicDataLoaders
makedocs(sitename="BasicDataLoaders")
deploydocs(repo = "github.com/lucasondel/BasicDataLoaders.git",
devbranch = "main")
| BasicDataLoaders | https://github.com/lucasondel/BasicDataLoaders.git |
|
[
"MIT"
] | 1.1.0 | 88eb6c42d1fc48144dfd2bb69f359ef6fb0adb59 | code | 765 | # DataLoaders - Basic data loaders for training machine learning
# models
#
# Lucas Ondel 2020
module BasicDataLoaders
using BSON
#######################################################################
# Basic input for loading / saving data, models, ...
export load
export save
include("io.jl")
#######################################################################
# Abstract data loader
export AbstractDataLoader
"""
abstract type AbstractDataLoader end
Base type for all the data loaders.
"""
abstract type AbstractDataLoader{T} end
# Subtypes should implement the Iteration and Indexing interfaces
#######################################################################
# Concrete data loaders
export DataLoader
include("dataloader.jl")
end
| BasicDataLoaders | https://github.com/lucasondel/BasicDataLoaders.git |
|
[
"MIT"
] | 1.1.0 | 88eb6c42d1fc48144dfd2bb69f359ef6fb0adb59 | code | 2726 | # DataLoaders - Concrete subtypes of AbstractDataLoader
#
# Lucas Ondel 2020
function Base.show(io::IO, dl::AbstractDataLoader)
println(io, "$(typeof(dl))")
println(io, " data: $(typeof(dl.data))")
print(io, " batchsize: $(dl.batchsize)")
end
_index(i, batchsize) = (i-1) * batchsize + 1
#######################################################################
# DataLoader
"""
struct DataLoader
data
batchsize
end
# Constructor
DataLoader(data[, batchsize = 1, preprocess = x -> x,
preprocess_element = x -> x])
where `data` is a sequence of elements to iterate over, `batchsize` is
the size of each batch, `preprocess` is a user-defined function to
apply on each batch and `preprocess_element` is a user-defined function
to apply on each batch's element. By default, `preprocess` and
`preprocess_element` are simply the identity function.
!!! warning
When iterating, the final batch may have a size smaller
than `batchsize`.
"""
struct DataLoader{T<:AbstractVector} <: AbstractDataLoader{T}
data::T
batchsize::UInt
fbatch::Function
felement::Function
function DataLoader(data::AbstractVector; batchsize = 1, preprocess = x -> x,
preprocess_element = x -> x)
length(data) > 0 || throw(ArgumentError("cannot create a DataLoader from an empty collection"))
batchsize >= 1 || throw(ArgumentError("`batchsize = $batchsize` should greater or equal to 1"))
new{typeof(data)}(data, batchsize, preprocess, preprocess_element)
end
end
function Base.iterate(dl::DataLoader, state = 1)
if state > size(dl.data, 1)
return nothing
end
offset = min(state+dl.batchsize-1, size(dl.data,1))
dl.fbatch(dl.felement.(dl.data[state:offset])), offset+1
end
Base.length(dl::DataLoader) = UInt(ceil(size(dl.data, 1)/dl.batchsize))
Base.eltype(dl::DataLoader) = eltype(dl.data)
function Base.getindex(dl::DataLoader, i)
1 <= i <= length(dl) || throw(BoundsError(dl, i))
start = _index(i, dl.batchsize)
offset = min(start + dl.batchsize - 1, size(dl.data,1))
dl.fbatch(dl.felement.(dl.data[start:offset]))
end
function Base.getindex(dl::DataLoader, ur::UnitRange)
1 <= ur.start <= length(dl) || throw(BoundsError(dl, ur.start))
1 <= ur.stop <= length(dl) || throw(BoundsError(dl, ur.stop))
N = size(dl.data, 1)
start = _index(ur.start, dl.batchsize)
offset = min(_index(ur.stop, dl.batchsize) + dl.batchsize - 1, N)
DataLoader(dl.data[start:offset], batchsize = dl.batchsize,
preprocess = dl.fbatch, preprocess_element = dl.felement)
end
Base.firstindex(dl::DataLoader) = 1
Base.lastindex(dl::DataLoader) = length(dl)
| BasicDataLoaders | https://github.com/lucasondel/BasicDataLoaders.git |
|
[
"MIT"
] | 1.1.0 | 88eb6c42d1fc48144dfd2bb69f359ef6fb0adb59 | code | 950 | # DataLoaders - Basic input/output operations
#
# Lucas Ondel 2020
"""
save(path, obj)
Write `obj` to file `path` in the [BSON format](http://bsonspec.org/).
The intermediate directories are created if they do not exists.
If `path` does not end with the extension ".bson", the extension is
appended to the output path. The function returns the type of the
object saved. See [`load`](@ref) to load this file again.
"""
function save(path, obj)
if ! endswith(path, ".bson")
path *= ".bson"
end
mkpath(dirname(path))
T = typeof(obj)
bson(path, data = obj, type = T)
T
end
"""
load(path)
Load a julia object saved in `path` with the function [`save`](@ref).
If `path` does not end with thex extension ".bson", the extension is
appended to input path.
"""
function load(path)
if ! endswith(path, ".bson")
path *= ".bson"
end
dict = BSON.load(path)
convert(dict[:type], dict[:data])
end
| BasicDataLoaders | https://github.com/lucasondel/BasicDataLoaders.git |
|
[
"MIT"
] | 1.1.0 | 88eb6c42d1fc48144dfd2bb69f359ef6fb0adb59 | code | 1885 |
using BasicDataLoaders
using Distributed
using Documenter
using Test
doctest(BasicDataLoaders)
@testset "input/output operations" begin
obj = Float32[1, 2, 3]
dir = mktempdir(cleanup = true)
path = joinpath(dir, "test")
T = save(path*".bson", obj)
@test T == Array{Float32, 1}
@test isfile(path*".bson")
lobj = load(path*".bson")
@test typeof(lobj) == Array{Float32, 1}
@test all(lobj .≈ obj)
save(path, obj)
@test ! isfile(path)
@test isfile(path*".bson")
lobj = load(path*".bson")
@test typeof(lobj) == Array{Float32, 1}
@test all(lobj .≈ obj)
end
@testset "Data loader" begin
obj = Float32[i for i in 1:10]
dl = DataLoader(obj, batchsize = 1)
@test typeof(dl) == DataLoader{Array{Float32, 1}}
@test_throws ArgumentError DataLoader(obj, batchsize = 0)
@test_throws ArgumentError DataLoader(obj, batchsize = -1)
@test_throws ArgumentError DataLoader([], batchsize = 1)
dl = DataLoader(obj, batchsize = 3)
@test length(dl) == 4
@test all(dl[1] .== [1, 2, 3])
@test all(dl[2] .== [4, 5, 6])
@test all(dl[3] .== [7, 8, 9])
@test all(dl[4] .== [10])
@test all(dl[1] .== dl[begin])
@test all(dl[4] .== dl[end])
sobj = [[1, 1], [2, 2], [3, 3]]
v = [3, 4]
sdl = DataLoader(obj, batchsize = 2)
sdl2 = DataLoader(obj, batchsize = 2, preprocess_element = x -> v .* x)
for (i, batch) in enumerate(sdl2)
@test all((sdl2[i]) .== [v .* a for a in sdl[i]])
end
dl2 = DataLoader(obj, batchsize = 3, preprocess = x -> 2 .* x)
for (i, batch) in enumerate(dl2)
@test all((2 .* dl[i]) .== dl2[i])
end
N = 10
dl = DataLoader(1:10, batchsize = 3, preprocess = x -> 2*x)
addprocs(2)
@everywhere using BasicDataLoaders
res = @distributed (+) for x in dl
sum(x)
end
@test res == N*(N+1)
end
| BasicDataLoaders | https://github.com/lucasondel/BasicDataLoaders.git |
|
[
"MIT"
] | 1.1.0 | 88eb6c42d1fc48144dfd2bb69f359ef6fb0adb59 | docs | 839 | # BasicDataLoaders
*Julia package providing a simple data loader to train machine learning
systems.*
| **Documentation** | **Test Status** |
|:------------------:|:-----------------:|
| [](https://lucasondel.github.io/BasicDataLoaders/stable) [](https://lucasondel.github.io/BasicDataLoaders/dev) |  |
## Installation
The package can be installed with the Julia package manager. From the Julia REPL, type ] to enter the Pkg REPL mode and run:
```julia
pkg> add BasicDataLoaders
```
Have a look at the [documentation](https://lucasondel.github.io/BasicDataLoaders/stable/) to get started!
## Authors
Lucas Ondel, Brno University of Technology, 2020
| BasicDataLoaders | https://github.com/lucasondel/BasicDataLoaders.git |
|
[
"MIT"
] | 1.1.0 | 88eb6c42d1fc48144dfd2bb69f359ef6fb0adb59 | docs | 2255 | # BasicDataLoaders
Julia package providing a simple data loader to train machine learning
systems.
The source code of the project is available on [github](https://github.com/lucasondel/BasicDataLoaders).
## Authors
Lucas Ondel, Brno University of Technology, 2020
## Installation
The package can be installed with the Julia package manager. From the
Julia REPL, type `]` to enter the Pkg REPL mode and run:
```julia
pkg> add BasicDataLoaders
```
## API
The package provide a simple data loader object:
```@docs
DataLoader
```
!!! note
`DataLoder` supports the iterating and indexing interface and,
consequently, it can be used in [distributed for
loops](https://docs.julialang.org/en/v1/manual/distributed-computing/).
Because it is very common for data loaders to load data from disk, the
package also provides two convenience functions to easily read and
write files:
```@docs
save
load
```
## Examples
Here is a complete example that simply print the batches:
```jldoctest
julia> using BasicDataLoaders
julia> dl = DataLoader(1:10, batchsize = 3)
DataLoader{UnitRange{Int64}}
data: UnitRange{Int64}
batchsize: 3
julia> for batch in dl println(batch) end
[1, 2, 3]
[4, 5, 6]
[7, 8, 9]
[10]
```
Here is another example that computes the sum of all even numbers
between 2 and 200 included:
```jldoctest
julia> using BasicDataLoaders
julia> dl = DataLoader(1:100, batchsize = 10, preprocess = x -> 2*x)
DataLoader{UnitRange{Int64}}
data: UnitRange{Int64}
batchsize: 10
julia> sum(sum(batch) for batch in dl)
10100
```
Finally, here is an example simulating loading data from files. In
practice, you can replace the printing function with the [`load`](@ref)
function.
```jldoctest
julia> using BasicDataLoaders
julia> files = ["file1.bson", "file2.bson", "file3.bson"]
3-element Array{String,1}:
"file1.bson"
"file2.bson"
"file3.bson"
julia> dl = DataLoader(files, batchsize = 2, preprocess = x -> println("load and merge files $x"))
DataLoader{Array{String,1}}
data: Array{String,1}
batchsize: 2
julia> for batch in dl println("do something on this batch") end
load and merge files ["file1.bson", "file2.bson"]
do something on this batch
load and merge files ["file3.bson"]
do something on this batch
```
| BasicDataLoaders | https://github.com/lucasondel/BasicDataLoaders.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 1111 | push!(LOAD_PATH,"../src/")
using Documenter, PowerModelsADA
makedocs(
modules = [PowerModelsADA],
sitename = "PowerModelsADA.jl",
authors = "Mohannad Alkhraijah",
format = Documenter.HTML(
analytics = "",
mathengine = Documenter.MathJax(),
collapselevel=1,
),
pages = [
"Home" => "index.md",
"Manual" => [
"Quick Start Guide" => "quickguide.md",
"Data Structure" => "data_structure.md",
"Technical Specifications" => "specification.md" ,
"Distributed Algorithms" => [
"ADMM" => "admm.md",
"ATC" => "atc.md",
"APP" => "app.md",
"ALADIN" => "aladin.md",
"Adaptive ADMM" => "adaptive_admm.md"
]
],
"Tutorials" => [
"Using PowerModelsADA" => "tutorial.md",
"User-defined Algorithm" => "newalgorithm.md"
],
"Library" => "library.md",
"Comparison Results" => "comparison.md"
]
)
deploydocs(
repo = "github.com/mkhraijah/PowerModelsADA.jl"
)
| PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 2925 | ## Import package
using PowerModelsADA
using Ipopt
## Read case with partition file and return dictionary of the paritioned case
case_path = "test/data/case14.m"
parition_file_path = "test/data/case14_2areas.csv"
data = parse_file(case_path)
assign_area!(data, parition_file_path)
## Settings and optimizer initiation
max_iteration = 1000
tol = 1e-3
optimizer = optimizer_with_attributes(Ipopt.Optimizer, "print_level"=>0)
model_type = ACPPowerModel
## Distributed algorithm
## ADMM with fully distributed structure
data_area = solve_dopf_admm(data, model_type, optimizer, tol=tol, max_iteration=max_iteration, print_level = 1, alpha=1000, save_data=["solution", "mismatch"], multiprocessors=false)
error_admm = compare_solution(data, data_area, model_type, optimizer)
## Adaptive ADMM with fully distributed structure
data_area = solve_dopf_adaptive_admm(data, model_type, optimizer, tol=tol, max_iteration=max_iteration, print_level = 1, alpha=100.0, mu_inc=1.05, mu_dec=1.05, eta_inc=0.05, eta_dec=0.02, save_data=["solution", "mismatch"])
error_adaptive_admm = compare_solution(data, data_area, model_type, optimizer)
## APP with fully distributed structure
data_area = solve_dopf_app(data, model_type, optimizer; tol=tol, max_iteration=max_iteration, print_level = 1, alpha=1000, save_data=["solution", "mismatch"])
error_app = compare_solution(data, data_area, model_type, optimizer)
## ATC with fully distributed structure
data_area = solve_dopf_atc(data, model_type, optimizer; tol=tol, max_iteration=max_iteration, print_level = 1, alpha=1.1)
error_atc = compare_solution(data, data_area, model_type, optimizer)
## ADMM with central coordinator structure
data_area = solve_dopf_admm_coordinated(data, model_type, optimizer; tol=tol, max_iteration=max_iteration, print_level = 1, alpha = 100);
error_admm_coordinated = compare_solution(data, data_area, model_type, optimizer)
## Adaptive ADMM with central coordinator structure
data_area = solve_dopf_adaptive_admm_coordinated(data, model_type, optimizer, tol=tol, max_iteration=max_iteration, print_level = 1, alpha=100.0, mu_inc=1.1, mu_dec=1.05, eta_inc=0.05, eta_dec=0.02, save_data=["solution", "mismatch"])
error_adaptive_admm_coordinated = compare_solution(data, data_area, model_type, optimizer)
## ATC with central coordinator structure
data_area = solve_dopf_atc_coordinated(data, model_type, optimizer; max_iteration=max_iteration, print_level = 1, alpha = 1.05)
error_atc = compare_solution(data, data_area, model_type, optimizer)
## ALADIN with central coordinator structure
sigma = Dict{String, Real}("va" => 10, "vm" => 5, "pf" => 1, "pt" => 1, "qf" => 1, "qt" => 1, "pg" => 1, "qg" => 1)
data_area = solve_dopf_aladin_coordinated(data, model_type, optimizer; tol=tol, max_iteration=max_iteration, print_level=1, p=100, mu=1000, r_p=1.5, r_mu=2, q_gamma=0, sigma=sigma)
error_aladin = compare_solution(data, data_area, model_type, optimizer) | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 2789 | ###############################################################################
# Build methods for the XX algorithm #
###############################################################################
"""
template for xx distributed algorithm
"""
module xx_methods
using ..PowerModelsADA
"solve distributed OPF using XX algorithm"
function solve_method(data, model_type::DataType, optimizer;
mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000,
print_level::Int64=1, parameters...)
solve_dopf(data, model_type, optimizer, xx_methods;
mismatch_method=mismatch_method, tol=tol, max_iteration=max_iteration,
print_level=print_level, parameters...)
end
"initialize the XX algorithm"
function initialize_method(data::Dict{String, <:Any}, model_type::Type; tol::Float64=1e-4, max_iteration::Int64=1000, kwargs...)
# initiate primal and dual shared variables
data["shared_variable"] = Dict(to_area=> variable_name=>value)
data["received_variable"] = Dict(from_area=> variable_name=>value)
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# xx parameters
data["parameter"] = Dict("alpha"=> get(kwargs, :alpha, 1000))
end
"build PowerModel using xx algorithm"
function build_method(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define constraints
constraint_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_function)
end
"set the xx algorithm objective"
function objective_function(pm::AbstractPowerModel)
###
objective = 0
###
return objective
end
"update the xx algorithm data after each iteration"
function update_method(data::Dict{String, <:Any})
###
###
calc_mismatch!(data)
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
end
"""
solve_dopf_xx(data::Dict{String, <:Any}, model_type::DataType, optimizer;
mismatch_method::String="norm",tol::Float64=1e-4, max_iteration::Int64=1000,
print_level::Int64=1, parameters)
Solve the distributed OPF problem using xx algorithm.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- print_level::Int64=1 : print mismatch after each iteration and result summary
"""
solve_dopf_xx = xx_methods.solve_method
# export the algorithm methods module and call method
export xx_methods, solve_dopf_xx | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 1634 | module PowerModelsADA
import JuMP
import PowerModels
import InfrastructureModels
import Serialization
import LinearAlgebra
import DelimitedFiles
import SparseArrays
import Suppressor: @capture_out
import Distributed
import PowerModels: AbstractPowerModel, parse_file, ids, ref, var, con, sol, nw_ids, nws, optimize_model!, update_data!, ref_add_core!, pm_it_sym, pm_it_name, nw_id_default, ismultinetwork, ismulticonductor, silence
const _PM = PowerModels
const _IM = InfrastructureModels
# const _pmada_global_keys = Set(["time_series", "per_unit", "parameter", "option", "solution", "local_solution", "shared_variable", "received_variable", "received_delta", "dual_variable", "received_dual_variable", "shared_sensitivities", "shared_dual_variable", "dual_residual", "mismatch", "dual_residual", "counter"])
const _pmada_global_keys = Set(["time_series", "per_unit", "parameter", "option", "solution", "mismatch", "counter", "previous_solution", "shared_flag_convergence", "received_flag_convergence", "shared_convergence_iteration", "received_convergence_iteration"])
include("core/base.jl")
include("core/variables.jl")
include("core/opf.jl")
include("core/data.jl")
include("core/data_sharing.jl")
include("core/util.jl")
include("core/export.jl")
include("algorithms/admm_methods.jl")
include("algorithms/atc_methods.jl")
include("algorithms/app_methods.jl")
include("algorithms/admm_coordinated_methods.jl")
include("algorithms/atc_coordinated_methods.jl")
include("algorithms/aladin_coordinated_methods.jl")
include("algorithms/adaptive_admm_methods.jl")
include("algorithms/adaptive_admm_coordinated_methods.jl")
end | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 11113 | ###############################################################################
# Build methods for adaptive ADMM algorithm #
###############################################################################
"""
adaptive ADMM algorithm module contains build and update methods
"""
module adaptive_admm_coordinated_methods
using ..PowerModelsADA
using LinearAlgebra
"solve distributed OPF using adaptive ADMM algorithm"
function solve_method(data, model_type::DataType, optimizer; kwargs...)
solve_dopf_coordinated(data, model_type, optimizer, adaptive_admm_coordinated_methods; kwargs...)
end
"initialize the adaptive ADMM algorithm"
function initialize_method_local(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
area_id =get_area_id(data)
areas_id = get_areas_id(data)
deleteat!(areas_id, areas_id .== area_id) # remove the same area from the list of areas_id
initialization_method = get(kwargs, :initialization_method, "flat")
# primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, 0, "shared_variable", initialization_method)
data["received_variable"] = initialize_shared_variable(data, model_type, area_id, 0, "received_variable", initialization_method)
data["dual_variable"] = initialize_shared_variable(data, model_type, area_id, 0, "dual_variable", initialization_method)
data["dual_residual"] = Dict{String, Any}()
initialize_dopf!(data, model_type; kwargs...)
# adaptive ADMM parameters
alpha = Float64(get(kwargs, :alpha, 1000.0))
data["parameter"] = Dict("alpha"=> alpha)
data["received_parameter"]= Dict{String, Any}("0" => data["parameter"]["alpha"])
# adaptive ADMM dual residual dictionary
if haskey(data, "previous_solution")
for str in ["shared_variable", "received_variable"]
if !haskey(data["previous_solution"], str)
data["previous_solution"][str]= Vector{Dict}()
end
end
else
data["previous_solution"]= Dict([str=> Vector{Dict}() for str in ["shared_variable", "received_variable"]])
end
# adaptive ADMM dual residual tolerance
if data["option"]["termination_measure"] in ["dual_residual", "mismatch_dual_residual"]
data["option"]["tol_dual"] = get(kwargs, :tol_dual, data["option"]["tol"])
end
end
"initialize the adaptive ADMM algorithm"
function initialize_method_coordinator(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
area_id =get_area_id(data)
areas_id = get_areas_id(data)
initialization_method = get(kwargs, :initialization_method, "flat")
# primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "shared_variable", initialization_method)
data["received_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "received_variable", initialization_method)
data["dual_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "dual_variable", initialization_method)
data["dual_residual"] = Dict{String, Any}()
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# adaptive ADMM dual residual dictionary
if haskey(data, "previous_solution")
for str in unique(["shared_variable", "received_variable",keys(data["previous_solution"])...])
if !haskey(data["previous_solution"], str)
data["previous_solution"][str]= Vector{Dict}()
end
end
else
data["previous_solution"]= Dict([str=> Vector{Dict}() for str in ["shared_variable", "received_variable"]])
end
# adaptive ADMM dual residual tolerance
if data["option"]["termination_measure"] in ["dual_residual", "mismatch_dual_residual"]
data["option"]["tol_dual"] = get(kwargs, :tol_dual, data["option"]["tol"])
end
# adaptive ADMM parameters
alpha = Float64(get(kwargs, :alpha, 1000))
alpha_max = Float64(get(kwargs, :alpha_max, 1e8))
alpha_min = Float64(get(kwargs, :alpha_min, 1))
mu_inc = Float64(get(kwargs, :mu_inc, 2.5))
mu_dec = Float64(get(kwargs, :mu_dec, 2.5))
eta_inc = Float64(get(kwargs, :eta_inc, 0.1))
eta_dec = Float64(get(kwargs, :eta_dec, 0.1))
data["parameter"] = Dict("alpha"=> alpha,"mu_inc"=> mu_inc, "mu_dec"=> mu_dec, "eta_inc"=> eta_inc, "eta_dec"=>eta_dec, "alpha_max"=>alpha_max, "alpha_min"=>alpha_min)
data["shared_parameter"] = Dict(string(area) => data["parameter"]["alpha"] for area in areas_id)
end
"build PowerModel object for the adaptive ADMM algorithm"
function build_method_local(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define constraints
constraint_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_adaptive_admm_local)
end
"build PowerModel object for the ADMM algorithm coordinator"
function build_method_coordinator(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_adaptive_admm_coordinator)
end
"adaptive ADMM algorithm objective function"
function objective_adaptive_admm_local(pm::AbstractPowerModel)
# parameters
alpha = pm.data["parameter"]["alpha"]
# data
shared_variable_received = pm.data["received_variable"]
dual_variable = pm.data["dual_variable"]
##objective function
objective = 0
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v = PowerModelsADA._var(pm, variable, idx)
v_central = shared_variable_received[area][variable][idx]
v_dual = dual_variable[area][variable][idx]
objective += alpha/2 * (v - v_central)^2 + v_dual * (v - v_central)
end
end
end
return objective
end
"adaptive ADMM algorithm objective function"
function objective_adaptive_admm_coordinator(pm::AbstractPowerModel)
# parameters
alpha = pm.data["parameter"]["alpha"]
# data
shared_variable_received = pm.data["received_variable"]
dual_variable = pm.data["dual_variable"]
##objective function
objective = 0
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v = PowerModelsADA._var(pm, variable, idx)
v_central = shared_variable_received[area][variable][idx]
v_dual = dual_variable[area][variable][idx]
objective += alpha/2 * (v - v_central)^2 + v_dual * (v - v_central)
end
end
end
return objective
end
"update the adaptive ADMM algorithm data after each iteration"
function update_method_local(data::Dict{String, <:Any})
# parameters
data["parameter"]["alpha"] = data["received_parameter"]["0"]
alpha = data["parameter"]["alpha"]
# data
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
dual_variable = deepcopy(data["dual_variable"])
# update dual variable
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v_primal = shared_variable_local[area][variable][idx]
v_central = shared_variable_received[area][variable][idx]
v_dual = dual_variable[area][variable][idx]
data["dual_variable"][area][variable][idx]= v_dual + alpha * (v_primal - v_central)
end
end
end
calc_dual_residual!(data)
calc_mismatch!(data)
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
"update the adaptive ADMM algorithm data after each iteration"
function update_method_coordinator(data::Dict{String, <:Any})
# parameters
alpha = deepcopy(data["parameter"]["alpha"])
alpha_max = data["parameter"]["alpha_max"]
alpha_min = data["parameter"]["alpha_min"]
mu_inc = data["parameter"]["mu_inc"]
mu_dec = data["parameter"]["mu_dec"]
eta_inc = data["parameter"]["eta_inc"]
eta_dec = data["parameter"]["eta_dec"]
# data
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
dual_variable = deepcopy(data["dual_variable"])
# update dual variable
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v_primal = shared_variable_local[area][variable][idx]
v_central = shared_variable_received[area][variable][idx]
v_dual = dual_variable[area][variable][idx]
data["dual_variable"][area][variable][idx]= v_dual + alpha * (v_primal - v_central)
end
end
end
calc_dual_residual!(data)
calc_mismatch!(data)
## update adaptive ADMM parameters
if data["mismatch"]["0"] > mu_inc * data["dual_residual"]["0"]
alpha = alpha * ( 1 + eta_inc)
elseif data["dual_residual"]["0"] > mu_dec * data["mismatch"]["0"]
alpha = alpha / ( 1 + eta_dec)
end
if alpha > alpha_max
alpha = alpha_max
elseif alpha < alpha_min
alpha = alpha_min
end
data["parameter"]["alpha"] = alpha
for area in keys(data["shared_parameter"])
data["shared_parameter"][area] = alpha
end
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
post_processors_local = [update_solution!, update_shared_variable!]
post_processors_coordinator = [update_solution!, update_shared_variable!]
push!(_pmada_global_keys, "shared_parameter", "shared_variable", "received_variable", "dual_variable", "dual_residual")
end
"""
solve_dopf_adaptive_admm(data::Dict{String, <:Any}, model_type::DataType, optimizer;
mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000,
print_level::Int64=1, print_optimizer_info::Bool=false, alpha::Real=1000)
Solve the distributed OPF problem using adaptive ADMM algorithm.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- print_level::Int64=1 : 0 - no print, 1 - print mismatch after each iteration and result summary, 2 - print optimizer output
- alpha::Real=1000 : algorithm parameter
"""
solve_dopf_adaptive_admm_coordinated = adaptive_admm_coordinated_methods.solve_method
# export the algorithm methods module and solve method
export adaptive_admm_coordinated_methods, solve_dopf_adaptive_admm_coordinated | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 10040 | ###############################################################################
# Build methods for adaptive ADMM algorithm #
###############################################################################
"""
adaptive ADMM algorithm module contains build and update methods
"""
module adaptive_admm_methods
using ..PowerModelsADA
using LinearAlgebra
"solve distributed OPF using adaptive ADMM algorithm"
function solve_method(data, model_type::DataType, optimizer; kwargs...)
solve_dopf(data, model_type, optimizer, adaptive_admm_methods; kwargs...)
end
"initialize the adaptive ADMM algorithm"
function initialize_method(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
area_id =get_area_id(data)
areas_id = get_areas_id(data)
deleteat!(areas_id, areas_id .== area_id) # remove the same area from the list of areas_id
initialization_method = get(kwargs, :initialization_method, "flat")
# primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "shared_variable", initialization_method)
data["received_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "received_variable", initialization_method)
data["dual_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "dual_variable", initialization_method)
data["dual_residual"] = Dict{String, Any}()
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# adaptive ADMM dual residual dictionary
if haskey(data, "previous_solution")
for str in unique(["shared_variable", "received_variable",keys(data["previous_solution"])...])
if !haskey(data["previous_solution"], str)
data["previous_solution"][str]= Vector{Dict}()
end
end
else
data["previous_solution"]= Dict([str=> Vector{Dict}() for str in ["shared_variable", "received_variable"]])
end
# adaptive ADMM dual residual tolerance
if data["option"]["termination_measure"] in ["dual_residual", "mismatch_dual_residual"]
data["option"]["tol_dual"] = get(kwargs, :tol_dual, data["option"]["tol"])
end
# adaptive ADMM parameters
alpha = Float64(get(kwargs, :alpha, 1000))
alpha_max = Float64(get(kwargs, :alpha_max, 1e8))
alpha_min = Float64(get(kwargs, :alpha_min, 1))
mu_inc = Float64(get(kwargs, :mu_inc, 2))
mu_dec = Float64(get(kwargs, :mu_dec, 2))
eta_inc = Float64(get(kwargs, :eta_inc, 0.2))
eta_dec = Float64(get(kwargs, :eta_dec, 0.2))
data["parameter"] = Dict("alpha"=> alpha,"mu_inc"=> mu_inc, "mu_dec"=> mu_dec, "eta_inc"=> eta_inc, "eta_dec"=>eta_dec, "alpha_max"=>alpha_max, "alpha_min"=>alpha_min)
data["alpha"] = initialize_shared_variable(data, model_type, area_id, areas_id, "parameter", "constant", alpha)
end
"build PowerModel object for the adaptive ADMM algorithm"
function build_method(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define constraints
constraint_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_adaptive_admm)
end
"adaptive ADMM algorithm objective function"
function objective_adaptive_admm(pm::AbstractPowerModel)
# parameters
alphas = pm.data["alpha"]
# data
shared_variable_local = pm.data["shared_variable"]
shared_variable_received = pm.data["received_variable"]
dual_variable = pm.data["dual_variable"]
##objective function
objective = 0
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v = PowerModelsADA._var(pm, variable, idx)
v_central = (shared_variable_local[area][variable][idx] + shared_variable_received[area][variable][idx])/2
v_dual = dual_variable[area][variable][idx]
alpha = alphas[area][variable][idx]
objective += alpha/2 * (v - v_central)^2 + v_dual * (v - v_central)
end
end
end
return objective
end
"update the adaptive ADMM algorithm data after each iteration"
function update_method(data::Dict{String, <:Any})
# parameters
alphas = data["alpha"]
alpha_max = data["parameter"]["alpha_max"]
alpha_min = data["parameter"]["alpha_min"]
mu_inc = data["parameter"]["mu_inc"]
mu_dec = data["parameter"]["mu_dec"]
eta_inc = data["parameter"]["eta_inc"]
eta_dec = data["parameter"]["eta_dec"]
# data
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
dual_variable = deepcopy(data["dual_variable"])
# update dual variable
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v_primal = shared_variable_local[area][variable][idx]
v_central = (shared_variable_local[area][variable][idx] + shared_variable_received[area][variable][idx])/2
v_dual = dual_variable[area][variable][idx]
alpha = alphas[area][variable][idx]
data["dual_variable"][area][variable][idx]= v_dual + alpha * (v_primal - v_central)
end
end
end
calc_dual_residual_adaptive!(data)
calc_mismatch!(data, central = true)
## update adaptive ADMM parameters
for area in keys(data["alpha"])
for variable in keys(data["alpha"][area])
for idx in keys(data["alpha"][area][variable])
if data["mismatch"][area][variable][idx] > mu_inc * data["dual_residual"][area][variable][idx]
data["alpha"][area][variable][idx] = data["alpha"][area][variable][idx] * ( 1 + eta_inc)
elseif data["dual_residual"][area][variable][idx] > mu_dec * data["mismatch"][area][variable][idx]
data["alpha"][area][variable][idx] = data["alpha"][area][variable][idx] / ( 1 + eta_dec)
end
if data["alpha"][area][variable][idx] > alpha_max
data["alpha"][area][variable][idx] = alpha_max
elseif data["alpha"][area][variable][idx] < alpha_min
data["alpha"][area][variable][idx] = alpha_min
end
end
end
end
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
"""
calc_dual_residual!(data::Dict{String, <:Any}; central::Bool=false)
calculate the dual redidual as seen by the area. Set central=true if the algorithm uses the optimality condition of a central coordinator.
"""
function calc_dual_residual_adaptive!(data::Dict{String, <:Any}; central::Bool=false)
area_id = string(get_area_id(data))
alpha = data["alpha"]
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
if data["counter"]["iteration"] == 1
dual_dual_residual = Dict{String, Any}([
area => Dict{String, Any}([
variable => Dict{String, Any}([
idx => central ? -alpha[area][variable][idx]* (shared_variable_local[area][variable][idx]+shared_variable_received[area][variable][idx])/2 : -alpha[area][variable][idx]* shared_variable_local[area][variable][idx]
for idx in keys(shared_variable_local[area][variable])])
for variable in keys(shared_variable_local[area])])
for area in keys(shared_variable_local)])
else
previous_shared_variable_local = data["previous_solution"]["shared_variable"][end]
previous_shared_variable_received = data["previous_solution"]["received_variable"][end]
dual_dual_residual = Dict{String, Any}([
area => Dict{String, Any}([
variable => Dict{String, Any}([
idx => central ? -alpha[area][variable][idx] * ((shared_variable_local[area][variable][idx]+shared_variable_received[area][variable][idx])/2 - (previous_shared_variable_local[area][variable][idx] +previous_shared_variable_received[area][variable][idx] )/2) : -alpha[area][variable][idx] * (shared_variable_local[area][variable][idx] - previous_shared_variable_local[area][variable][idx])
for idx in keys(shared_variable_local[area][variable])])
for variable in keys(shared_variable_local[area])])
for area in keys(shared_variable_local) ])
end
dual_dual_residual[area_id] = LinearAlgebra.norm([value for area in keys(dual_dual_residual) if area != area_id for variable in keys(dual_dual_residual[area]) for (idx,value) in dual_dual_residual[area][variable]])
data["dual_residual"] = dual_dual_residual
end
post_processors = [update_solution!, update_shared_variable!]
push!(_pmada_global_keys, "alpha", "shared_variable", "received_variable", "dual_variable", "dual_residual")
end
"""
solve_dopf_adaptive_admm(data::Dict{String, <:Any}, model_type::DataType, optimizer;
mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000,
print_level::Int64=1, print_optimizer_info::Bool=false, alpha::Real=1000)
Solve the distributed OPF problem using adaptive ADMM algorithm.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- print_level::Int64=1 : 0 - no print, 1 - print mismatch after each iteration and result summary, 2 - print optimizer output
- alpha::Real=1000 : algorithm parameter
"""
solve_dopf_adaptive_admm = adaptive_admm_methods.solve_method
# export the algorithm methods module and solve method
export adaptive_admm_methods, solve_dopf_adaptive_admm | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 7811 | ###############################################################################
# Build methods for ADMM algorithm with coordinator #
###############################################################################
"""
ADMM algorithm module containsbuild and update methods
"""
module admm_coordinated_methods
using ..PowerModelsADA
"solve distributed OPF using ADMM algorithm with central coordinator"
function solve_method(data, model_type::DataType, optimizer; kwargs...)
solve_dopf_coordinated(data, model_type, optimizer, admm_coordinated_methods; kwargs...)
end
"initialize the ADMM algorithm local area"
function initialize_method_local(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
area_id = get_area_id(data)
initialization_method = get(kwargs, :initialization_method, "flat")
# primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, 0, "shared_variable", initialization_method)
data["received_variable"] = initialize_shared_variable(data, model_type, area_id, 0, "received_variable", initialization_method)
data["dual_variable"] = initialize_shared_variable(data, model_type, area_id , 0, "dual_variable", initialization_method)
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# initialize ADMM parameters
data["parameter"] = Dict("alpha"=> Float64(get(kwargs, :alpha, 1000)))
# ADMM dual residual dictionary and tolerance
if data["option"]["termination_measure"] in ["dual_residual", "mismatch_dual_residual"]
if haskey(data, "previous_solution")
for str in ["shared_variable", "received_variable"]
if !haskey(data["previous_solution"], str)
data["previous_solution"][str]= Vector{Dict}()
end
end
else
data["previous_solution"]= Dict([str=> Vector{Dict}() for str in ["shared_variable", "received_variable"]])
end
data["option"]["tol_dual"] = get(kwargs, :tol_dual, data["option"]["tol"])
end
end
"initializethe ADMM algorithm coordinator"
function initialize_method_coordinator(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
area_id = get_area_id(data)
areas_id = get_areas_id(data)
initialization_method = get(kwargs, :initialization_method, "flat")
# initialize primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "shared_variable", initialization_method)
data["received_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "received_variable", initialization_method)
data["dual_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "dual_variable", initialization_method)
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# initialize ADMM parameters
data["parameter"] = Dict("alpha"=> get(kwargs, :alpha, 1000))
if data["option"]["termination_measure"] in ["dual_residual", "mismatch_dual_residual"]
if haskey(data, "previous_solution")
for str in ["shared_variable", "received_variable"]
if !haskey(data["previous_solution"], str)
data["previous_solution"][str]= Vector{Dict}()
end
end
else
data["previous_solution"]= Dict([str=> Vector{Dict}() for str in ["shared_variable", "received_variable"]])
end
end
# ADMM dual residual tolerance
if data["option"]["termination_measure"] in ["dual_residual", "mismatch_dual_residual"]
data["option"]["tol_dual"] = get(kwargs, :tol_dual, data["option"]["tol"])
end
end
"build PowerModel object for the ADMM algorithm local area"
function build_method_local(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define constraints
constraint_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_admm_local)
end
"build PowerModel object for the ADMM algorithm coordinator"
function build_method_coordinator(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_admm_coordinator)
end
"ADMM algorithm objective function of the coordinator"
function objective_admm_local(pm::AbstractPowerModel)
# parameters
alpha = pm.data["parameter"]["alpha"]
# data
shared_variable_received = pm.data["received_variable"]
dual_variable = pm.data["dual_variable"]
# objective function
objective = 0
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v = PowerModelsADA._var(pm, variable, idx)
v_central = shared_variable_received[area][variable][idx]
v_dual = dual_variable[area][variable][idx]
objective += alpha/2 * (v - v_central)^2 + v_dual * (v - v_central)
end
end
end
return objective
end
"ADMM algorithm objective function of the local area"
objective_admm_coordinator(pm::AbstractPowerModel) = objective_admm_local(pm)
"update the ADMM algorithm coordinator data after each iteration"
function update_method_local(data::Dict{String, <:Any})
# parameters
alpha = data["parameter"]["alpha"]
# data
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
dual_variable = data["dual_variable"]
# update dual variable
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v_local = shared_variable_local[area][variable][idx]
v_central = shared_variable_received[area][variable][idx]
v_dual = dual_variable[area][variable][idx]
data["dual_variable"][area][variable][idx]= v_dual + alpha * (v_local - v_central)
end
end
end
calc_mismatch!(data)
if data["option"]["termination_measure"] in ["dual_residual", "mismatch_dual_residual"]
calc_dual_residual!(data)
end
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
"update the ADMM algorithm coordinator data after each iteration"
update_method_coordinator(data::Dict{String, <:Any}) = update_method_local(data)
post_processors_local = [update_solution!, update_shared_variable!]
post_processors_coordinator = [update_solution!, update_shared_variable!]
push!(_pmada_global_keys, "shared_variable", "received_variable", "dual_variable")
end
"""
solve_dopf_admm_coordinated(data::Dict{String, <:Any}, model_type::DataType, optimizer; tol::Float64=1e-4,
max_iteration::Int64=1000, print_level::Int64=1, alpha::Real=1000)
Solve the distributed OPF problem using ADMM algorithm with central coordinator.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- print_level::Int64=1 : 0 - no print, 1 - print mismatch after each iteration and result summary, 2 - print optimizer output
- alpha::Real=1000 : algorithm parameters
"""
solve_dopf_admm_coordinated = admm_coordinated_methods.solve_method
# export the algorithm methods module and solve method
export admm_coordinated_methods, solve_dopf_admm_coordinated | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 5789 | ###############################################################################
# Build methods for ADMM algorithm #
###############################################################################
"""
ADMM algorithm module contains build and update methods
"""
module admm_methods
using ..PowerModelsADA
"solve distributed OPF using ADMM algorithm"
function solve_method(data, model_type::Type, optimizer; kwargs...)
solve_dopf(data, model_type, optimizer, admm_methods; kwargs...)
end
"initialize the ADMM algorithm"
function initialize_method(data::Dict{String, <:Any}, model_type::Type; kwargs...)
area_id = get_area_id(data)
areas_id = get_areas_id(data)
deleteat!(areas_id, areas_id .== area_id) # remove the same area from the list of areas_id
initialization_method = get(kwargs, :initialization_method, "flat")
# primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "shared_variable", initialization_method)
data["received_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "received_variable", initialization_method)
data["dual_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "dual_variable", initialization_method)
data["dual_residual"] = Dict{String, Any}()
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# ADMM parameters
data["parameter"] = Dict("alpha"=> Float64(get(kwargs, :alpha, 1000)))
# ADMM dual residual dictionary and tolerance
if data["option"]["termination_measure"] in ["dual_residual", "mismatch_dual_residual"]
if haskey(data, "previous_solution")
for str in ["shared_variable", "received_variable"]
if !haskey(data["previous_solution"], str)
data["previous_solution"][str]= Vector{Dict}()
end
end
else
data["previous_solution"] = Dict([str=> Vector{Dict}() for str in ["shared_variable", "received_variable"]])
end
data["option"]["tol_dual"] = get(kwargs, :tol_dual, data["option"]["tol"])
end
end
"build PowerModel object for the ADMM algorithm"
function build_method(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define constraints
constraint_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_admm)
end
"ADMM algorithm objective function"
function objective_admm(pm::AbstractPowerModel)
# parameters
alpha = pm.data["parameter"]["alpha"]
# data
shared_variable_local = pm.data["shared_variable"]
shared_variable_received = pm.data["received_variable"]
dual_variable = pm.data["dual_variable"]
##objective function
objective = 0
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v = PowerModelsADA._var(pm, variable, idx)
v_central = (shared_variable_local[area][variable][idx] + shared_variable_received[area][variable][idx])/2
v_dual = dual_variable[area][variable][idx]
objective += alpha/2 * (v - v_central)^2 + v_dual * (v - v_central)
end
end
end
return objective
end
"update the ADMM algorithm data after each iteration"
function update_method(data::Dict{String, <:Any})
# parameters
alpha = data["parameter"]["alpha"]
# data
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
dual_variable = data["dual_variable"]
# update dual variable
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v_primal = shared_variable_local[area][variable][idx]
v_central = (shared_variable_local[area][variable][idx] + shared_variable_received[area][variable][idx])/2
v_dual = dual_variable[area][variable][idx]
data["dual_variable"][area][variable][idx]= v_dual + alpha * (v_primal - v_central)
end
end
end
calc_mismatch!(data, central=true)
if data["option"]["termination_measure"] in ["dual_residual", "mismatch_dual_residual"]
calc_dual_residual!(data, central=true)
end
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
post_processors = [update_solution!, update_shared_variable!]
push!(_pmada_global_keys, "shared_variable", "received_variable", "dual_variable", "dual_residual")
end
"""
solve_dopf_admm(data::Dict{String, <:Any}, model_type::DataType, optimizer;
mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000,
print_level::Int64=1, print_optimizer_info::Bool=false, alpha::Real=1000)
Solve the distributed OPF problem using ADMM algorithm.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- tol_dual::Float64=1e-4 : dual residual tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- print_level::Int64=1 : 0 - no print, 1 - print mismatch after each iteration and result summary, 2 - print optimizer output
- alpha::Real=1000 : algorithm parameter
"""
solve_dopf_admm = admm_methods.solve_method
# export the algorithm methods module and solve method
export admm_methods, solve_dopf_admm | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 26736 | ###############################################################################
# Build methods for ALADIN algorithm with coordinator #
###############################################################################
# """
# ALADIN algorithm module contains build and update methods
# """
module aladin_coordinated_methods
using JuMP
using PowerModels
using SparseArrays
using LinearAlgebra
using ..PowerModelsADA
"initialize the ALADIN algorithm local area"
function initialize_method_local(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
area_id = get_area_id(data)
areas_id = get_areas_id(data)
deleteat!(areas_id, areas_id .== area_id) # remove the same area from the list of areas_id
# initialize primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, 0, "shared_variable", "flat")
data["shared_dual_variable"] = Dict{String, Dict{String, Any}}("0" => initialize_shared_variable(data, model_type, area_id, areas_id, "shared_dual_variable", "flat"))
data["received_dual_variable"] = Dict{String, Dict{String, Any}}("0" => initialize_shared_variable(data, model_type, area_id, areas_id, "received_dual_variable", "flat"))
data["shared_sensitivities"] = Dict{String, Dict{String, Any}}("0" => Dict("g"=>Dict{String, Any}(), "C"=>Dict{String, Any}(), "B"=> Dict{String, Any}() ))
data["local_solution"] = initialize_all_variable(data, model_type)
data["received_delta"] = Dict{String, Dict{String, Any}}("0" => initialize_all_variable(data, model_type, "zeros"))
# initialize algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# initialize ALADIN parameters
p = Float64(get(kwargs, :p, 1000))
r_p = Float64(get(kwargs, :r_p, 1.3))
p_upper = Float64(get(kwargs, :p_upper, 1e6))
a1 = Float64(get(kwargs, :a1, 1))
a2 = Float64(get(kwargs, :a2, 1))
a3 = Float64(get(kwargs, :a3, 1))
q_gamma = get(kwargs, :q_gamma, 0)
sigma = get(kwargs, :sigma, NaN)
data["parameter"] = Dict("p" => p, "r_p"=> r_p, "p_upper"=>p_upper, "a1"=>a1, "a2"=>a2, "a3"=> a3, "q_gamma"=>q_gamma, "sigma"=>sigma)
end
"initialize the ALADIN algorithm coordinator"
function initialize_method_coordinator(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
data_system = data
data = deepcopy(data_system)
data = decompose_coordinator(data)
areas_id = get_areas_id(data)
# initialize primal and dual shared variables
data["received_sensitivities"] = Dict{String,Any}([string(area) => Dict{String, Any}() for area in areas_id])
data["received_variable"] = initialize_shared_variable(data, model_type, 0 ,areas_id, "received_variable", "flat")
data["shared_dual_variable"] = Dict{String,Any}()
data["received_dual_variable"] = Dict{String,Any}()
data["shared_delta"] = Dict{String,Any}()
for i in areas_id
data_area = decompose_system(data_system, i)
areas = deepcopy(areas_id)
deleteat!(areas, areas .== i)
data["shared_dual_variable"][string(i)] = initialize_shared_variable(data, model_type, i ,areas, "shared_dual_variable", "flat")
data["received_dual_variable"][string(i)] = initialize_shared_variable(data, model_type, i ,areas, "received_dual_variable", "flat")
data["shared_delta"][string(i)] = initialize_all_variable(data_area, model_type, "shared_delta")
end
# initialize distributed algorithm parameters
initialize_dopf!(data, model_type; kwargs...)
mu = Float64(get(kwargs, :mu, 1000))
r_mu = Float64(get(kwargs, :r_mu, 2))
mu_upper = Float64(get(kwargs, :mu_upper, 2e6))
a1 = Float64(get(kwargs, :a1, 1))
a2 = Float64(get(kwargs, :a2, 1))
a3 = Float64(get(kwargs, :a3, 1))
q_gamma = Float64(get(kwargs, :q_gamma, 0))
sigma = get(kwargs, :sigma, NaN)
data["parameter"] = Dict("mu" => mu, "r_mu" => r_mu, "mu_upper" => mu_upper, "a1" => a1, "a2" => a2, "a3" => a3, "q_gamma" => q_gamma, "sigma" => sigma)
return data
end
function calc_mismatch_aladin!(data::Dict{String, <:Any}; p::Int64=2 )
area_id = string(get_area_id(data))
mismatch_method = data["option"]["mismatch_method"]
shared_variable_local = data["shared_variable"]["0"]
shared_variable_solution = data["local_solution"]
mismatch = Dict{String, Any}([
variable => Dict{String, Any}([
idx => shared_variable_local[variable][idx] - shared_variable_solution[variable][idx]
for idx in keys(shared_variable_local[variable])])
for variable in keys(shared_variable_local)])
if mismatch_method == "norm"
mismatch[area_id] = LinearAlgebra.norm([value for variable in keys(mismatch) for (idx,value) in mismatch[variable]], p)
elseif mismatch_method == "max" || mismatch_method == "maximum"
mismatch[area_id] = LinearAlgebra.maximum([value for variable in keys(mismatch) for (idx,value) in mismatch[variable]])
end
data["mismatch"] = mismatch
end
"update the ALADIN algorithm coordinator data after each iteration"
function update_method_local(data::Dict{String, <:Any})
# parameters
p = data["parameter"]["p"]
r_p = data["parameter"]["r_p"]
p_upper = data["parameter"]["p_upper"]
a1 = data["parameter"]["a1"]
a2 = data["parameter"]["a2"]
a3 = data["parameter"]["a3"]
## data
dual_variable_local = data["shared_dual_variable"]["0"]
dual_variable_central = data["received_dual_variable"]["0"]
delta = data["received_delta"]["0"]
solution = data["solution"]
local_solution = data["local_solution"]
## update dual variable
for area in keys(dual_variable_local)
for variable in keys(dual_variable_local[area])
for idx in keys(dual_variable_local[area][variable])
dual_central = dual_variable_central[area][variable][idx]
dual_local = dual_variable_local[area][variable][idx]
dual_variable_local[area][variable][idx]= dual_local + a3 * (dual_central - dual_local)
end
end
end
## update solution (corresponding to update all variables)
for variable in keys(local_solution)
for idx in keys(local_solution[variable])
v_sol = solution[variable][idx]
v_local = local_solution[variable][idx]
v_delta = delta[variable][idx]
local_solution[variable][idx] = v_local + a1 * (v_sol - v_local) + a2 * v_delta
end
end
# update parameters
if p < p_upper
data["parameter"]["p"] = r_p * p
end
calc_mismatch_aladin!(data)
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
"build PowerModel object for the ALADIN algorithm local area"
function build_method_local(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define constraints
constraint_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_aladin_local)
end
"ALADIN algorithm objective function of the coordinator"
function objective_aladin_local(pm::AbstractPowerModel)
## ALADIN parameters
p = pm.data["parameter"]["p"]
q_gamma = pm.data["parameter"]["q_gamma"]
sigma = pm.data["parameter"]["sigma"]
## data
area_id = get_area_id(pm)
dual_variable = pm.data["shared_dual_variable"]["0"]
local_solution = pm.data["local_solution"]
## objective function
if haskey(pm.data["solution"], "qg")
objective = q_gamma*(sum(PowerModelsADA._var(pm, "qg", string(idx)) for idx in ids(pm, :gen)))^2
else
objective = 0
end
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v = PowerModelsADA._var(pm, variable, idx)
v_dual = dual_variable[area][variable][idx]
if area_id < parse(Int64,area)
objective += v * v_dual
else
objective -= v * v_dual
end
end
end
end
for variable in keys(local_solution)
for idx in keys(local_solution[variable])
v = PowerModelsADA._var(pm, variable, idx)
v_local = local_solution[variable][idx]
objective += sigma[variable] * p / 2 * (v - v_local)^2
end
end
return objective
end
function update_sensitivities!(pm::AbstractPowerModel, solution::Dict{String, <:Any})
solution["shared_sensitivities"] = pm.data["shared_sensitivities"]
solution["shared_sensitivities"]["0"]["g"] = compute_cost_gradient(pm)
solution["shared_sensitivities"]["0"]["C"] = compute_active_constraint_jacobian(pm, true)
solution["shared_sensitivities"]["0"]["B"] = compute_optimal_hessian(pm)
end
post_processors_local = [update_solution!, update_shared_variable!, update_sensitivities!]
function compute_cost_gradient(pm::AbstractPowerModel)
g = initialize_all_variable(pm.data, typeof(pm),"zeros")
for (i,gen) in pm.data["gen"]
pg = PowerModelsADA._var(pm, "pg", i)
g["pg"][i] = 2*gen["cost"][1]*value(pg) + gen["cost"][2]
end
return g
end
function compute_active_constraint_jacobian(pm::AbstractPowerModel, delete_dependent_constraints::Bool)
# obtain the Jacobian matrix
C_matrix = active_constraint_jacobian(pm, true)
# arrange Jacobian matrix for each variable
variable_dict = initialize_all_variable(pm.data, typeof(pm),"zeros")
C = Dict([variable => Dict([idx=> Vector() for idx in keys(variable_dict[variable])]) for variable in keys(variable_dict)])
for variable in keys(C)
for idx in keys(C[variable])
v = PowerModelsADA._var(pm, variable, idx)
col = JuMP.index(v).value
C[variable][idx] = C_matrix[:,col]
end
end
return C
end
function active_constraint_jacobian(pm::AbstractPowerModel, delete_dependent_constraints::Bool)
model = pm.model
x = JuMP.all_variables(model)
x_optimal = JuMP.value.(x)
C = zeros(0,length(x))
if !(isempty(JuMP.all_nonlinear_constraints(model)))
d = JuMP.NLPEvaluator(model)
JuMP.MOI.initialize(d, [:Jac])
jacobian_sparsity = JuMP.MOI.jacobian_structure(d)
I = [i for (i, _) in jacobian_sparsity]
J = [j for (_, j) in jacobian_sparsity]
V = zeros(length(jacobian_sparsity))
nc = JuMP.num_nonlinear_constraints(model)
JuMP.MOI.eval_constraint_jacobian(d, V, x_optimal)
C = SparseArrays.sparse(I, J, V, nc, length(x))
end
#Now add in gradients for linear and quadratic constraints, which were not included in NLPEvaluator
cref_types = JuMP.list_of_constraint_types(model)
for t in cref_types
if t[1] == JuMP.AffExpr #Add gradients of all active linear constraints
crefs = JuMP.all_constraints(model, t[1], t[2])
for cref in crefs
if abs(JuMP.dual(cref)) > 1e-3 || t[2] == JuMP.MOI.EqualTo{Float64} #only add gradient of active constraints
cgrad = zeros(1, length(x))
for k = 1:length(x)
cgrad[1,k] = JuMP.normalized_coefficient(cref, x[k])
end
C = vcat(C, cgrad)
end
end
elseif t[1] == QuadExpr #Add gradients of all active quadratic constraints
vmap = Dict(x[i] => i for i in 1:length(x))
crefs = JuMP.all_constraints(model, t[1], t[2])
for cref in crefs
if abs(dual(cref)) > 1e-3 || t[2] == JuMP.MOI.EqualTo{Float64}
cgrad = zeros(1,length(x))
func_set = JuMP.constraint_object(cref)
for (vars, coef) in func_set.func.terms
if vars.a != vars.b
cgrad[1,vmap[vars.a]] += x_optimal[vmap[vars.b]]*coef
cgrad[1,vmap[vars.b]] += x_optimal[vmap[vars.a]]*coef
else
cgrad[1,vmap[vars.a]] += 2*x_optimal[vmap[vars.a]]*coef
end
end
for k = 1:length(x)
cgrad[1,k] += JuMP.coefficient(func_set.func, x[k]) #add coefficient of affine terms
end
C = vcat(C, cgrad)
end
end
end
end
#Add gradient of active variable bound constraints
for k = 1:length(x)
if JuMP.has_lower_bound(x[k])
if abs(JuMP.value(x[k]) - JuMP.lower_bound(x[k])) < 1e-6
cgrad = zeros(1, length(x))
cgrad[1,k] = 1
C = vcat(C, cgrad)
end
end
if JuMP.has_upper_bound(x[k])
if abs(JuMP.value(x[k]) - JuMP.upper_bound(x[k])) < 1e-6
cgrad = zeros(1, length(x))
cgrad[1,k] = 1
C = vcat(C, cgrad)
end
end
if JuMP.is_fixed(x[k])
cgrad = zeros(1, length(x))
cgrad[1,k] = 1
C = vcat(C, cgrad)
end
end
# Delete linearly dependent rows until constraints are all linearly independent (Ipopt doesn't do this, although Gurobi would)
if (delete_dependent_constraints)
C = delete_dependent_row(C)
end
return Matrix(C)
end
function delete_dependent_row(mat)
while(size(mat)[1] > LinearAlgebra.rank(mat))
x = findfirst(i -> rank(mat[1:end .!= i, :]) == rank(mat), 1:size(mat)[1])
mat =mat[1:end .!= x, :]
end
return mat
end
function compute_optimal_hessian(pm::AbstractPowerModel)
# obtain the Hessian matrix
B_matrix = optimal_hessian(pm)
# arrange Hessian matrix for each variable
variable_dict = initialize_all_variable(pm.data, typeof(pm),"zeros")
B = Dict([variable1 => Dict([idx1=> Dict(
[variable2 => Dict([idx2=> 0.0 for idx2 in keys(variable_dict[variable2])]) for variable2 in keys(variable_dict)]
) for idx1 in keys(variable_dict[variable1])]) for variable1 in keys(variable_dict)])
for variable1 in keys(B)
for idx1 in keys(B[variable1])
v1 = PowerModelsADA._var(pm, variable1, idx1)
for variable2 in keys(B)
for idx2 in keys(B[variable2])
v2 = PowerModelsADA._var(pm, variable2, idx2)
row = JuMP.index(v1).value
col = JuMP.index(v2).value
B[variable1][idx1][variable2][idx2] = B_matrix[row,col]
end
end
end
end
return B
end
function optimal_hessian(pm::AbstractPowerModel)
pm_temp = deepcopy(pm)
model = pm_temp.model
x = all_variables(model)
n = num_variables(model)
x_optimal = value.(x)
y_optimal = -dual.(all_nonlinear_constraints(model))
crefs = Dict{ConstraintRef,Float64}()
for (F, S) in list_of_constraint_types(model)
for cref in all_constraints(model, F, S)
crefs[cref] = -dual(cref)
end
end
#change the objective to generator cost only
PowerModels.objective_min_fuel_and_flow_cost(pm_temp)
# compute hessian matrix
H = spzeros(length(x),length(x))
if !(isempty(all_nonlinear_constraints(model)))
d = NLPEvaluator(model)
MOI.initialize(d, [:Hess])
hessian_sparsity = MOI.hessian_lagrangian_structure(d)
I = [i for (i, _) in hessian_sparsity]
J = [j for (_, j) in hessian_sparsity]
V = zeros(length(hessian_sparsity))
MOI.eval_hessian_lagrangian(d, V, x_optimal, 1.0, y_optimal)
H = SparseArrays.sparse(I, J, V, n, n)
end
vmap = Dict(x[i] => i for i in 1:n)
for (F, S) in list_of_constraint_types(model)
for cref in all_constraints(model, F, S)
add_to_hessian(H, constraint_object(cref).func, crefs[cref], vmap)
end
end
add_to_hessian(H, objective_function(model), 1.0, vmap)
B = fill_off_diagonal(H)
if findmin(eigvals(Matrix(B)))[1] < 1e-4
F = eigen(Matrix(B));
d = zeros(length(F.values));
for idx = 1:length(F.values)
if F.values[idx] < -1e-4
d[idx] = abs(F.values[idx])
d[idx] = -1*(F.values[idx])
elseif -1e-4 <= F.values[idx] <= 1e-4
d[idx] = 1e-4
else
d[idx] = F.values[idx]
end
end
D = LinearAlgebra.Diagonal(d)
B = F.vectors*abs.(D)*transpose(F.vectors)
end
return B
end
add_to_hessian(H, f::Any, μ, vmap) = nothing
function add_to_hessian(H, f::QuadExpr, μ, vmap)
for (vars, coef) in f.terms
if vars.a != vars.b
H[vmap[vars.a], vmap[vars.b]] += μ * coef
else
H[vmap[vars.a], vmap[vars.b]] += 2 * μ * coef
end
end
end
#helper function to fill in missing symmetric elements in sparse Hessian
function fill_off_diagonal(H)
ret = H + H'
row_vals = SparseArrays.rowvals(ret)
non_zeros = SparseArrays.nonzeros(ret)
for col in 1:size(ret, 2)
for i in SparseArrays.nzrange(ret, col)
if col == row_vals[i]
non_zeros[i] /= 2
end
end
end
return ret
end
"solve the ALADIN algorithm coordinator problem"
function solve_coordinator!(data, optimizer)
mu = data["parameter"]["mu"]
shared_variable = data["received_variable"]
dual_variable = data["received_dual_variable"]
sensitivities = data["received_sensitivities"]
delta = data["shared_delta"]
qp_dual_variable = data["shared_dual_variable"]
model = Model(optimizer)
# define variables
x = Dict{String, Any}(area => Dict{String, Any}(variable => Dict{String, Any}(idx => JuMP.@variable(model, base_name=string("x_", area, "_", variable, "_", idx)) for idx in keys(delta[area][variable])) for variable in keys(delta[area])) for area in keys(delta))
s = Dict{String, Any}(area1 => Dict{String, Any}(area2=> Dict{String, Any}(variable => Dict{String, Any}(idx => JuMP.@variable(model, base_name=string("s_", variable, "_", idx)) for idx in keys(dual_variable[area1][area2][variable])) for variable in keys(dual_variable[area1][area2])) for area2 in keys(dual_variable[area1]) if area1<area2) for area1 in keys(dual_variable))
# define objective function
qp_objective = JuMP.GenericQuadExpr{Float64, JuMP.VariableRef}()
for area1 in keys(dual_variable)
for area2 in keys(dual_variable[area1])
if area1 < area2
for variable in keys(dual_variable[area1][area2])
for (idx,val) in dual_variable[area1][area2][variable]
qp_objective += val*s[area1][area2][variable][idx] + mu/2*(s[area1][area2][variable][idx])^2
end
end
end
end
end
for area in keys(x)
for variable in keys(x[area])
for idx in keys(x[area][variable])
qp_objective += sum(0.5*x[area][variable][idx]*x[area][variable2][idx2]*sensitivities[area]["B"][variable][idx][variable2][idx2] for variable2 in keys(x[area]) for idx2 in keys(x[area][variable2]))+ (sensitivities[area]["g"][variable][idx]*x[area][variable][idx])
end
end
end
@objective(model, Min, qp_objective)
# define constraints
constraint_ref = Dict{String,Any}(area1 => Dict{String,Any}(area2 => Dict{String,Any}(variable => Dict{String, JuMP.ConstraintRef}([idx => @constraint(model, x[area1][variable][idx] + shared_variable[area1][variable][idx] - x[area2][variable][idx] - shared_variable[area2][variable][idx] == s[area1][area2][variable][idx]) for idx in keys(dual_variable[area1][area2][variable])]) for variable in keys(dual_variable[area1][area2]) ) for area2 in keys(dual_variable[area1]) if area1<area2) for area1 in keys(dual_variable))
for area in keys(x)
n_constraints = size(first(first(data["received_sensitivities"][area]["C"])[2])[2])[1]
for n in 1:n_constraints
@constraint(model, sum(sensitivities[area]["C"][variable][idx][n]*x[area][variable][idx] for variable in keys(x[area]) for idx in keys(x[area][variable])) == 0)
end
end
optimize!(model)
for area in keys(delta)
for variable in keys(delta[area])
for idx in keys(delta[area][variable])
delta[area][variable][idx] = value(x[area][variable][idx])
end
end
end
for area1 in keys(qp_dual_variable)
for area2 in keys(qp_dual_variable[area1])
if area1 < area2
for variable in keys(qp_dual_variable[area1][area2])
for idx in keys(qp_dual_variable[area1][area2][variable])
qp_dual_variable[area1][area2][variable][idx] = -dual(constraint_ref[area1][area2][variable][idx])
qp_dual_variable[area2][area1][variable][idx] = -dual(constraint_ref[area1][area2][variable][idx])
end
end
end
end
end
end
"update the ALADIN algorithm coordinator data after each iteration"
function update_method_coordinator(data::Dict{String, <:Any})
if data["parameter"]["mu"] < data["parameter"]["mu_upper"]
data["parameter"]["mu"] = data["parameter"]["r_mu"]*data["parameter"]["mu"]
end
save_solution!(data)
update_iteration!(data)
end
push!(_pmada_global_keys, "local_solution", "shared_variable", "received_variable", "shared_delta", "received_delta", "dual_variable", "shared_dual_variable", "received_dual_variable", "shared_sensitivities", "received_sensitivities")
end
"""
solve_dopf_aladin_coordinated(data::Dict{String, <:Any}, model_type::DataType, optimizer; tol::Float64=1e-4,
max_iteration::Int64=1000, print_level = true, p::Real=1000, mu::Real=1000, p_upper::Real=1e6, mu_upper::Real=2e6, r_p::Real=1.5, mu_p::Real=2, a1::Real=1, a2::Real=1, # a3::Real=1, q_gamma::Real=0, sigma::Dict{String,Real}=Dict())
Solve the distributed OPF problem using ALADIN algorithm with central coordinator.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- print_level::Int64=1 : print mismatch after each iteration and result summary
- p::Real=1000 : parameter
- mu::Real=1000 : parameter
- p_upper::Real=1e6 : parameter
- mu_upper::Real=2e6 : parameter
- r_p::Real=1.5 : parameter
- r_mu::Real=2 : parameter
- a1::Real=1 : parameter
- a2::Real=1 : parameter
- a3::Real=1 : parameter
- q_gamma::Real=0 : parameter
- sigma::Dict{String, <:Any}=Dict() : dictionary with variable name as key and parameter value as values
"""
function solve_dopf_aladin_coordinated(data::Union{Dict{String, <:Any}, String}, model_type::DataType, optimizer; mismatch_method::String="norm", tol::Float64=1e-4, max_iteration::Int64=1000, print_level::Int64=1, p::Real=1000, mu::Real=1000, p_upper::Real=1e6, mu_upper::Real=2e6, r_p::Real=1.5, r_mu::Real=2, a1::Real=1, a2::Real=1, a3::Real=1, q_gamma::Real=0, sigma::Dict{String, <:Any}=Dict{String,Any}("w"=> 20, "wr"=>5, "wi"=>5 ,"vi"=> 10, "vr"=> 10, "va" => 10, "vm" => 5, "pf" => 1, "pt" => 1, "qf" => 1, "qt" => 1, "pg" => 1, "qg" => 1))
# obtain and standardize case data
if isa(data, String)
data = parse_file(data)
end
PowerModels.standardize_cost_terms!(data, order=2)
# obtain and arrange areas id
arrange_areas_id!(data)
areas_id = get_areas_id(data)
# decompose the system into subsystems
data_area = Dict{Int64, Any}()
for i in areas_id
data_area[i] = decompose_system(data, i)
end
# initilize distributed power model parameters
data_coordinator = aladin_coordinated_methods.initialize_method_coordinator(data, model_type; mismatch_method=mismatch_method, max_iteration=max_iteration, tol=tol, a1=a1, a2=a2, a3=a3, mu=mu, r_mu=r_mu, mu_upper=mu_upper, q_gamma=q_gamma, sigma=sigma)
for i in areas_id
aladin_coordinated_methods.initialize_method_local(data_area[i], model_type; mismatch_method=mismatch_method, max_iteration=max_iteration, tol=tol, p=p, a1=a1, a2=a2, a3=a3, r_p=r_p, p_upper=p_upper, q_gamma=q_gamma, sigma=sigma)
end
## initialaize the algorithms global counters
iteration = 0
flag_convergence = false
## start iteration
while iteration < max_iteration && !flag_convergence
# solve local area problems in parallel
info1 = @capture_out begin
Threads.@threads for i in areas_id
result = solve_pmada_model(data_area[i], model_type, optimizer, aladin_coordinated_methods.build_method_local, solution_processors=aladin_coordinated_methods.post_processors_local)
update_data!(data_area[i], result["solution"])
end
end
# share solution of local areas with the coordinator
for i in areas_id # sender subsystem
shared_data = prepare_shared_data(data_area[i], 0, serialize = false)
receive_shared_data!(data_coordinator, shared_data, i)
end
# solve coordinator problem
info2 = @capture_out begin
aladin_coordinated_methods.solve_coordinator!(data_coordinator, optimizer)
end
# share coordinator solution with local areas
for i in areas_id # sender subsystem
shared_data = prepare_shared_data(data_coordinator, i, serialize = false)
receive_shared_data!(data_area[i], shared_data, 0)
end
# update local areas and coordinator problems after
aladin_coordinated_methods.update_method_coordinator(data_coordinator)
for i in areas_id
aladin_coordinated_methods.update_method_local(data_area[i])
end
# check global convergence and update iteration counters
flag_convergence = update_global_flag_convergence(data_area)
iteration += 1
# print solution
print_iteration(data_area, print_level, [info1; info2])
end
data_area[0] = data_coordinator
print_convergence(data_area, print_level)
return data_area
end
# export the algorithm methods module and solve method
export aladin_coordinated_methods, solve_dopf_aladin_coordinated | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 5157 | ###############################################################################
# Build methods for APP algorithm #
###############################################################################
"""
APP algorithm module contains build and update methods
"""
module app_methods
using ..PowerModelsADA
"solve distributed OPF using APP algorithm"
function solve_method(data, model_type::DataType, optimizer; kwargs...)
solve_dopf(data, model_type, optimizer, app_methods; kwargs...)
end
"initialize the APP algorithm"
function initialize_method(data::Dict{String, <:Any}, model_type::Type; kwargs...)
area_id = get_area_id(data)
areas_id = get_areas_id(data)
deleteat!(areas_id, areas_id .== area_id) # remove the same area from the list of areas_id
initialization_method = get(kwargs, :initialization_method, "flat")
# primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "shared_variable", initialization_method)
data["received_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "received_variable", initialization_method)
data["dual_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "dual_variable", initialization_method)
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# initialize APP parameters
data["parameter"] = Dict(
"alpha" => Float64(get(kwargs, :alpha, 1000)),
"beta" => Float64(get(kwargs, :beta, 2*Float64(get(kwargs, :alpha, 1000)))),
"gamma" => Float64(get(kwargs, :gamma, Float64(get(kwargs, :alpha, 1000)))))
end
"build PowerModel object for the APP algorithm"
function build_method(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define constraints
constraint_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_app)
end
"APP algorithm objective function"
function objective_app(pm::AbstractPowerModel)
## APP parameters
alpha = pm.data["parameter"]["alpha"]
beta = pm.data["parameter"]["beta"]
gamma = pm.data["parameter"]["gamma"]
## data
shared_variable_local = pm.data["shared_variable"]
shared_variable_received = pm.data["received_variable"]
dual_variable = pm.data["dual_variable"]
## objective function
objective = 0
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v = PowerModelsADA._var(pm, variable, idx)
v_neighbor = shared_variable_received[area][variable][idx]
v_local = shared_variable_local[area][variable][idx]
v_dual = dual_variable[area][variable][idx]
objective += beta/2 * (v - v_local)^2 + gamma * v * (v_local - v_neighbor) + v * v_dual
end
end
end
return objective
end
"update the APP algorithm data after each iteration"
function update_method(data::Dict{String, <:Any})
## APP parameters
alpha = data["parameter"]["alpha"]
## data
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
dual_variable = data["dual_variable"]
## update dual variable
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v_neighbor = shared_variable_received[area][variable][idx]
v_local = shared_variable_local[area][variable][idx]
v_dual = dual_variable[area][variable][idx]
data["dual_variable"][area][variable][idx] = v_dual + alpha * (v_local - v_neighbor)
end
end
end
calc_mismatch!(data)
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
post_processors = [update_solution!, update_shared_variable!]
push!(_pmada_global_keys, "shared_variable", "received_variable", "dual_variable")
end
"""
solve_dopf_app(data::Dict{String, <:Any}, model_type::DataType, optimizer;
mismatch_method::String="norm",tol::Float64=1e-4, max_iteration::Int64=1000,
print_level::Int64=1, alpha::Real=1000, beta::Real, gamma::Real)
Solve the distributed OPF problem using APP algorithm.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- print_level::Int64=1 : print mismatch after each iteration and result summary
- alpha::Real=1000 : algorithm parameter
- beta::Real=2alpha : algorithm parameter
- gamma::Real=alpha : algorithm parameter
"""
solve_dopf_app = app_methods.solve_method
# export the algorithm methods module and solve method
export app_methods, solve_dopf_app | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
|
[
"MIT"
] | 0.1.4 | 1b4ce3c83cc3946f34782ffbeb2248009e260cba | code | 6724 | ###############################################################################
# Build methods for ATC algorithm with coordinator #
###############################################################################
"""
ATC algorithm module contains build and update methods
"""
module atc_coordinated_methods
using ..PowerModelsADA
"solve distributed OPF using ATC algorithm with central coordinator"
function solve_method(data, model_type::DataType, optimizer; kwargs...)
solve_dopf_coordinated(data, model_type, optimizer, atc_coordinated_methods; kwargs...)
end
"initialize the ATC algorithm local area"
function initialize_method_local(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
area_id = get_area_id(data)
initialization_method = get(kwargs, :initialization_method, "flat")
# primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, [0], "shared_variable", initialization_method)
data["received_variable"] = initialize_shared_variable(data, model_type, area_id, [0], "received_variable", initialization_method)
data["dual_variable"] = initialize_shared_variable(data, model_type, area_id ,[0], "dual_variable", initialization_method)
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# initialize ATC parameters
data["parameter"] = Dict(
"alpha" => Float64(get(kwargs, :alpha, 1.05)),
"beta" => Float64(get(kwargs, :beta, 1)),
"beta_max" => Float64(get(kwargs, :beta_max, 1e6)))
end
"initialize the ATC algorithm coordinator"
function initialize_method_coordinator(data::Dict{String, <:Any}, model_type::DataType; kwargs...)
area_id = get_area_id(data)
areas_id = get_areas_id(data)
initialization_method = get(kwargs, :initialization_method, "flat")
# initialize primal and dual shared variables
data["shared_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "shared_variable", initialization_method)
data["received_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "received_variable", initialization_method)
data["dual_variable"] = initialize_shared_variable(data, model_type, area_id, areas_id, "dual_variable", initialization_method)
# distributed algorithm settings
initialize_dopf!(data, model_type; kwargs...)
# initialize ATC parameters
data["parameter"] = Dict(
"alpha" => Float64(get(kwargs, :alpha, 1.05)),
"beta" => Float64(get(kwargs, :beta, 1)),
"beta_max" => Float64(get(kwargs, :beta_max, 1e6)))
end
"build PowerModel object for ATC algorithm local area"
function build_method_local(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define constraints
constraint_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_atc_local)
end
"build PowerModel object for the ATC algorithm coordinator"
function build_method_coordinator(pm::AbstractPowerModel)
# define variables
variable_opf(pm)
# define objective function
objective_min_fuel_and_consensus!(pm, objective_atc_coordinator)
end
"ATC algorithm objective function of the coordinator"
function objective_atc_local(pm::AbstractPowerModel)
## atc parameters
beta = pm.data["parameter"]["beta"]
## data
shared_variable_received = pm.data["received_variable"]
dual_variable = pm.data["dual_variable"]
## objective function
objective = 0
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v = PowerModelsADA._var(pm, variable, idx)
v_central = shared_variable_received[area][variable][idx]
v_dual = dual_variable[area][variable][idx]
objective += (beta * (v - v_central))^2 + v_dual * (v - v_central)
end
end
end
return objective
end
"ATC algorithm objective function of the local area"
objective_atc_coordinator(pm::AbstractPowerModel) = objective_atc_local(pm)
"update the ATC algorithm coordinator data after each iteration"
function update_method_local(data::Dict{String, <:Any})
## ATC parameters
alpha = data["parameter"]["alpha"]
beta = data["parameter"]["beta"]
beta_max = data["parameter"]["beta_max"]
## data
shared_variable_local = data["shared_variable"]
shared_variable_received = data["received_variable"]
dual_variable = data["dual_variable"]
## update dual variable
for area in keys(dual_variable)
for variable in keys(dual_variable[area])
for idx in keys(dual_variable[area][variable])
v_local = shared_variable_local[area][variable][idx]
v_central = shared_variable_received[area][variable][idx]
v_dual = dual_variable[area][variable][idx]
data["dual_variable"][area][variable][idx]= v_dual + 2 * beta^2 * (v_local - v_central)
end
end
end
## update ATC parameter
if beta < beta_max
data["parameter"]["beta"] *= alpha
end
calc_mismatch!(data)
update_flag_convergence!(data)
save_solution!(data)
update_iteration!(data)
end
"update the ATC algorithm coordinator data after each iteration"
update_method_coordinator(data::Dict{String, <:Any}) = update_method_local(data)
post_processors_local = [update_solution!, update_shared_variable!]
post_processors_coordinator = [update_solution!, update_shared_variable!]
push!(_pmada_global_keys, "shared_variable", "received_variable", "dual_variable")
end
"""
solve_dopf_atc_coordinated(data::Dict{String, <:Any}, model_type::DataType, optimizer; tol::Float64=1e-4,
max_iteration::Int64=1000, print_level = true, alpha::Real=1000)
Solve the distributed OPF problem using ATC algorithm with central coordinator.
# Arguments:
- data::Dict{String, <:Any} : dictionary contains case in PowerModel format
- model_type::DataType : power flow formulation (PowerModel type)
- optimizer : optimizer JuMP initiation object
- mismatch_method::String="norm" : mismatch calculation method (norm, max)
- tol::Float64=1e-4 : mismatch tolerance
- max_iteration::Int64=1000 : maximum number of iteration
- print_level::Int64=1 : print mismatch after each iteration and result summary
- alpha::Real=1.05 : algorithm parameters
- beta::Real=1.0 : algorithm parameters
"""
solve_dopf_atc_coordinated = atc_coordinated_methods.solve_method
# export the algorithm methods module and solve method
export atc_coordinated_methods, solve_dopf_atc_coordinated | PowerModelsADA | https://github.com/mkhraijah/PowerModelsADA.jl.git |
Subsets and Splits