licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | code | 3369 | @testset "LongSubSeq" begin
@testset "Construction" begin
seq = LongSequence{AminoAcidAlphabet}([AA_A, AA_V, AA_W, AA_Y, AA_H])
v1 = LongSubSeq{AminoAcidAlphabet}(seq.data, 2:4)
v2 = LongSubSeq(seq, 2:4)
v3 = view(seq, 2:4)
v4 = @view seq[2:4]
v5 = LongSubSeq(seq, :)
vv = LongSubSeq(v1, 2:3)
vv2 = v1[2:3]
vv3 = LongSubSeq(vv)
@test_throws BoundsError LongSubSeq(seq, 0:4)
@test_throws BoundsError LongSubSeq(seq, 1:6)
@test_throws BoundsError LongSubSeq(v1, 1:4)
@test collect(v5) == collect(seq)
@test typeof(v1) == typeof(v2) == typeof(v3) == typeof(v4) == typeof(vv) == LongSubSeq{AminoAcidAlphabet}
@test v1 == v2 == v3 == v4
@test vv == vv2 == vv3
vv[1] = AA_V
@test vv == vv2 == vv3
@test LongSubSeq{AminoAcidAlphabet}(seq) == view(seq, eachindex(seq))
s = dna"TASCTAWTA"
@test collect(LongSubSeq{RNAAlphabet{4}}(s, 3:7)) == collect(LongRNA{4}(s)[3:7])
end
@testset "Basics" begin
seq = LongSequence{AminoAcidAlphabet}([AA_A, AA_V, AA_W, AA_Y, AA_H])
v1 = LongSubSeq{AminoAcidAlphabet}(seq, 2:4)
v2 = LongSubSeq(seq, 1:0)
@test length(v1) == 3
@test !isempty(v1)
@test isempty(v2)
@test length(v2) == 0
v1[1] = 'N'
v1[2] = 'K'
@test String(seq) == "ANKYH"
end
# Added after issue 260
@testset "Random construction" begin
for i in 1:100
seq = randdnaseq(rand(15:65))
begin_ = min(lastindex(seq), rand(10:30))
range = begin_:min(lastindex(seq), begin_ + rand(0:40))
seq2 = view(seq, range)
@test seq2 isa LongSubSeq{typeof(Alphabet(seq))}
seq3 = LongSequence(seq2)
@test typeof(seq) == typeof(seq3)
@test seq[range] == seq2 == seq3
end
# See issue 260
seq = dna"CATTTTTTTTTTTTTTT"
seq2 = LongSequence(LongSubSeq(seq, 1:17))
@test seq == seq2
end
@testset "Conversion" begin
seq = LongDNA{4}("TAGTATCGAAMYCGNA")
v = LongSubSeq(seq, 3:14)
@test LongSequence(v) == seq[3:14]
s2 = LongSequence{RNAAlphabet{4}}(seq[3:14])
@test LongSequence{RNAAlphabet{4}}(v) == s2
@test LongSequence(LongSubSeq{RNAAlphabet{4}}(seq)) == LongSequence{RNAAlphabet{4}}(seq)
@test LongSubSeq{RNAAlphabet{4}}(seq) == LongSequence{RNAAlphabet{4}}(seq)
end
@testset "Transformations" begin
# Reverse!
str = "SKVANNSFDGRKIQAWPSRQ"
seq = LongAA(str)
seq2 = copy(seq)
v = view(seq, 1:lastindex(seq))
reverse!(v)
@test seq == LongAA(reverse(str))
@test seq == v
@test seq != seq2
reverse!(v)
@test seq == LongAA(str)
@test seq == v
@test seq == seq2
seq = LongDNA{4}("TGAGTCGTAGGAAGGACCTAAA")
seq2 = copy(seq)
v = LongSubSeq(seq2, 3:15)
complement!(v)
@test seq2[3:15] == complement(seq[3:15])
@test seq2[1:2] == dna"TG"
@test seq2[16:end] == seq[16:end]
# A longer example to engage some inner loops
seq = randdnaseq(38)
seq2 = copy(seq)
v = LongSubSeq(seq, 3:36)
complement!(v)
@test v == complement(seq2[3:36])
end
@testset "Copying" begin
seq = LongRNA{4}("UAUUAACCGGAGAUCAUUCAGGUAA")
v1 = view(seq, 1:3)
v2 = view(seq, 4:11)
@test copy(v1) == seq[1:3]
@test copy(v2) == seq[4:11]
# Can't resize views
@test_throws Exception copy!(v1, v2)
# Works even when sharing underlying data
# Note it is not possible to have v2 == v3 after copying
v3 = view(seq, 1:8)
before = LongSequence(v3)
copy!(v2, v3)
@test v2 == before
# Also works with nonshared data
v1 = view(randaaseq(20), 3:17)
v2 = view(randaaseq(20), 3:17)
copy!(v1, v2)
@test v1 == v2
end
end # seqview
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | code | 561 | @testset "Shuffle" begin
@testset for i in 1:10
@test shuffle(dna"") == dna""
@test shuffle(dna"A") == dna"A"
@test shuffle(dna"C") == dna"C"
end
seq = dna"ACGTN"^10
@test shuffle(seq) != dna"ACGTN"^10
@test seq == dna"ACGTN"^10
@test shuffle!(seq) === seq
@test seq != dna"ACGTN"^10
@test count(x -> x == DNA_A, seq) == 10
@test count(x -> x == DNA_C, seq) == 10
@test count(x -> x == DNA_G, seq) == 10
@test count(x -> x == DNA_T, seq) == 10
@test count(x -> x == DNA_N, seq) == 10
end
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | code | 6831 | @testset "Transformations" begin
function test_reverse(A, seq)
revseq = reverse(LongSequence{A}(seq))
@test String(revseq) == reverse(seq)
revseq = reverse!(LongSequence{A}(seq))
@test String(revseq) == reverse(seq)
end
function test_dna_complement(A, seq)
comp = complement(LongSequence{A}(seq))
@test String(comp) == dna_complement(seq)
end
function test_rna_complement(A, seq)
comp = complement(LongSequence{A}(seq))
@test String(comp) == rna_complement(seq)
end
function test_dna_revcomp(A, seq)
revcomp = reverse_complement(LongSequence{A}(seq))
@test String(revcomp) == reverse(dna_complement(seq))
end
function test_rna_revcomp(A, seq)
revcomp = reverse_complement(LongSequence{A}(seq))
@test String(revcomp) == reverse(rna_complement(seq))
end
@testset "Reverse" begin
for len in [0, 1, 32, 1000], _ in 1:5
test_reverse(DNAAlphabet{4}, random_dna(len))
test_reverse(RNAAlphabet{4}, random_rna(len))
test_reverse(AminoAcidAlphabet, random_aa(len))
probs = [0.25, 0.25, 0.25, 0.25, 0.00]
test_reverse(DNAAlphabet{2}, random_dna(len, probs))
test_reverse(RNAAlphabet{2}, random_rna(len, probs))
end
end
@testset "Complement" begin
for len in [0, 1, 10, 32, 1000], _ in 1:5
test_dna_complement(DNAAlphabet{4}, random_dna(len))
test_rna_complement(RNAAlphabet{4}, random_rna(len))
probs = [0.25, 0.25, 0.25, 0.25, 0.00]
test_dna_complement(DNAAlphabet{2}, random_dna(len, probs))
test_rna_complement(RNAAlphabet{2}, random_rna(len, probs))
end
seq_string = join(rand("-ACGTSWKMYRBDHVN", 1000))
seq = complement(LongSequence{DNAAlphabet{4}}(seq_string))
@test String(seq) == dna_complement(seq_string)
seq_string = join(rand("-ACGUSWKMYRBDHVN", 1000))
seq = complement(LongSequence{RNAAlphabet{4}}(seq_string))
@test String(seq) == rna_complement(seq_string)
end
@testset "Reverse complement" begin
for len in [0, 1, 10, 32, 1000], _ in 1:5
test_dna_revcomp(DNAAlphabet{4}, random_dna(len))
test_rna_revcomp(RNAAlphabet{4}, random_rna(len))
probs = [0.25, 0.25, 0.25, 0.25, 0.00]
test_dna_revcomp(DNAAlphabet{2}, random_dna(len, probs))
test_rna_revcomp(RNAAlphabet{2}, random_rna(len, probs))
end
seq_string = join(rand("-ACGTSWKMYRBDHVN", 1000))
seq = reverse_complement(LongSequence{DNAAlphabet{4}}(seq_string))
@test String(seq) == reverse(dna_complement(seq_string))
seq_string = join(rand("-ACGUSWKMYRBDHVN", 1000))
seq = reverse_complement(LongSequence{RNAAlphabet{4}}(seq_string))
@test String(seq) == reverse(rna_complement(seq_string))
seq_string = join(rand("-ACGTSWKMYRBDHVN", 1000))
seq = reverse_complement!(LongSequence{DNAAlphabet{4}}(seq_string))
@test String(seq) == reverse(dna_complement(seq_string))
seq_string = join(rand("-ACGTSWKMYRBDHVN", 1000))
seq = LongSequence{DNAAlphabet{4}}(seq_string)
seq2 = seq[100:200]
reverse_complement!(seq2)
@test String(seq) == seq_string
@test String(seq2) == reverse(dna_complement(seq_string[100:200]))
# Test RC'ing works even with extra data in data buffer
seq = ungap!(dna"ACTG-----------CCAG")
@test String(reverse_complement(seq)) == reverse(dna_complement(String(seq)))
slice = randdnaseq(10)[2:9]
@test reverse_complement(reverse_complement(slice)) == slice
end
@testset "Map" begin
seq = dna""
@test map(identity, seq) == dna""
seq = dna"AAA"
@test map(x -> DNA_C, seq) == dna"CCC"
seq = dna"ACGTNACGTN"
@test map(x -> x == DNA_N ? DNA_A : x, seq) == dna"ACGTAACGTA"
@test seq == dna"ACGTNACGTN"
@test map!(x -> x == DNA_N ? DNA_A : x, seq) === seq
@test seq == dna"ACGTAACGTA"
seq = rna""
@test map(identity, seq) == rna""
seq = rna"AAA"
@test map(x -> RNA_C, seq) == rna"CCC"
seq = rna"ACGUNACGUN"
@test map(x -> x == RNA_N ? RNA_A : x, seq) == rna"ACGUAACGUA"
@test seq == rna"ACGUNACGUN"
@test map!(x -> x == RNA_N ? RNA_A : x, seq) === seq
@test seq == rna"ACGUAACGUA"
seq = aa""
@test map(identity, seq) == aa""
seq = aa"MMM"
@test map(x -> AA_P, seq) == aa"PPP"
seq = aa"XRNDCQXE"
@test map(x -> x == AA_X ? AA_A : x, seq) == aa"ARNDCQAE"
@test seq == aa"XRNDCQXE"
@test map!(x -> x == AA_X ? AA_A : x, seq) === seq
@test seq == aa"ARNDCQAE"
end
@testset "Filter" begin
seq = dna""
@test filter(x -> true, seq) == dna""
@test filter(x -> false, seq) == dna""
seq = dna"AAA"
@test filter(x -> x == DNA_A, seq) == dna"AAA"
@test filter(x -> x == DNA_C, seq) == dna""
seq = dna"ACGTNACGTN"
@test filter(x -> x == DNA_N, seq) == dna"NN"
@test filter(x -> x != DNA_N, seq) == dna"ACGTACGT"
@test seq == dna"ACGTNACGTN"
@test filter!(x -> x != DNA_N, seq) == seq
@test seq == dna"ACGTACGT"
for len in [1, 2, 3, 5, 8, 9, 15, 19, 31, 32, 33, 50], _ in 1:5
str = random_dna(len)
seq = LongDNA{4}(str)
@test filter(x -> x == DNA_N, seq) ==
LongDNA{4}(filter(x -> x == 'N', str))
@test filter(x -> x != DNA_N, seq) ==
LongDNA{4}(filter(x -> x != 'N', str))
end
seq = rna""
@test filter(x -> true, seq) == rna""
@test filter(x -> false, seq) == rna""
seq = rna"AAA"
@test filter(x -> x == RNA_A, seq) == rna"AAA"
@test filter(x -> x == RNA_C, seq) == rna""
seq = rna"ACGUNACGUN"
@test filter(x -> x == RNA_N, seq) == rna"NN"
@test filter(x -> x != RNA_N, seq) == rna"ACGUACGU"
@test seq == rna"ACGUNACGUN"
@test filter!(x -> x != RNA_N, seq) == seq
@test seq == rna"ACGUACGU"
seq = aa""
@test filter(x -> true, seq) == aa""
@test filter(x -> false, seq) == aa""
seq = aa"PPP"
@test filter(x -> x == AA_P, seq) == aa"PPP"
@test filter(x -> x != AA_P, seq) == aa""
seq = aa"ARNDCQXGHILKMFPXTWYVOUX"
@test filter(x -> x == AA_X, seq) == aa"XXX"
@test filter(x -> x != AA_X, seq) == aa"ARNDCQGHILKMFPTWYVOU"
@test seq == aa"ARNDCQXGHILKMFPXTWYVOUX"
@test filter!(x -> x != AA_X, seq) == seq
@test seq == aa"ARNDCQGHILKMFPTWYVOU"
end
end
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | code | 6006 | @testset "Approximate" begin
seq = dna"ACGTACG"
@testset "iscompatible" begin
@testset "forward" begin
@test findnext(ApproximateSearchQuery(dna"", iscompatible), 0, seq, 1) === 1:0
@test findnext(ApproximateSearchQuery(dna"AC", iscompatible), 0, seq, 1) === 1:2
@test findnext(ApproximateSearchQuery(dna"AC", iscompatible), 0, seq, 2) === 5:6
@test findnext(ApproximateSearchQuery(dna"AT", iscompatible), 0, seq, 1) === nothing
@test findnext(ApproximateSearchQuery(dna"AT", iscompatible), 1, seq, 1) === 1:1
@test findnext(ApproximateSearchQuery(dna"AT", iscompatible), 1, seq, 2) === 3:4
@test findnext(ApproximateSearchQuery(dna"NG", iscompatible), 0, seq, 1) === 2:3
@test findnext(ApproximateSearchQuery(dna"NG", iscompatible), 1, seq, 1) === 1:1
@test findnext(ApproximateSearchQuery(dna"GN", iscompatible), 0, seq, 1) === 3:4
@test findnext(ApproximateSearchQuery(dna"GN", iscompatible), 1, seq, 1) === 1:1
@test findnext(ApproximateSearchQuery(dna"ACG", iscompatible), 0, seq, 1) === 1:3
@test findnext(ApproximateSearchQuery(dna"ACG", iscompatible), 1, seq, 1) === 1:2
@test findnext(ApproximateSearchQuery(dna"ACG", iscompatible), 2, seq, 1) === 1:1
@test findnext(ApproximateSearchQuery(dna"ACG", iscompatible), 3, seq, 1) === 1:0
@test findnext(ApproximateSearchQuery(dna"ACG", iscompatible), 4, seq, 1) === 1:0
end
@testset "backward" begin
# TODO: maybe this should return 8:7 like rsearch
@test findprev(ApproximateSearchQuery(dna"", iscompatible), 0, seq, 7) === 7:6
@test findprev(ApproximateSearchQuery(dna"AC", iscompatible), 0, seq, 7) === 5:6
@test findprev(ApproximateSearchQuery(dna"AC", iscompatible), 0, seq, 5) === 1:2
@test findprev(ApproximateSearchQuery(dna"AT", iscompatible), 0, seq, 7) === nothing
@test findprev(ApproximateSearchQuery(dna"AT", iscompatible), 1, seq, 7) === 5:6
@test findprev(ApproximateSearchQuery(dna"AT", iscompatible), 1, seq, 6) === 5:6
@test findprev(ApproximateSearchQuery(dna"AT", iscompatible), 1, seq, 5) === 5:5
@test findprev(ApproximateSearchQuery(dna"NG", iscompatible), 0, seq, 7) === 6:7
@test findprev(ApproximateSearchQuery(dna"NG", iscompatible), 0, seq, 6) === 2:3
@test findprev(ApproximateSearchQuery(dna"GN", iscompatible), 0, seq, 7) === 3:4
@test findprev(ApproximateSearchQuery(dna"GN", iscompatible), 1, seq, 7) === 7:7
@test findprev(ApproximateSearchQuery(dna"ACG", iscompatible), 0, seq, 7) === 5:7
@test findprev(ApproximateSearchQuery(dna"ACG", iscompatible), 1, seq, 7) === 6:7
@test findprev(ApproximateSearchQuery(dna"ACG", iscompatible), 2, seq, 7) === 7:7
@test findprev(ApproximateSearchQuery(dna"ACG", iscompatible), 3, seq, 7) === 7:6
@test findprev(ApproximateSearchQuery(dna"ACG", iscompatible), 4, seq, 7) === 7:6
end
end
@testset "isequal" begin
@testset "forward" begin
@test findnext(ApproximateSearchQuery(dna""), 0, seq, 1) === 1:0
@test findnext(ApproximateSearchQuery(dna"AC"), 0, seq, 1) === 1:2
@test findnext(ApproximateSearchQuery(dna"AC"), 0, seq, 2) === 5:6
@test findnext(ApproximateSearchQuery(dna"AT"), 0, seq, 1) === nothing
@test findnext(ApproximateSearchQuery(dna"AT"), 1, seq, 1) === 1:1
@test findnext(ApproximateSearchQuery(dna"AT"), 1, seq, 2) === 3:4
@test findnext(ApproximateSearchQuery(dna"NG"), 0, seq, 1) === nothing
@test findnext(ApproximateSearchQuery(dna"NG"), 1, seq, 1) === 2:3
@test findnext(ApproximateSearchQuery(dna"GN"), 0, seq, 1) === nothing
@test findnext(ApproximateSearchQuery(dna"GN"), 1, seq, 1) === 3:3
@test findnext(ApproximateSearchQuery(dna"ACG"), 0, seq, 1) === 1:3
@test findnext(ApproximateSearchQuery(dna"ACG"), 1, seq, 1) === 1:2
@test findnext(ApproximateSearchQuery(dna"ACG"), 2, seq, 1) === 1:1
@test findnext(ApproximateSearchQuery(dna"ACG"), 3, seq, 1) === 1:0
@test findnext(ApproximateSearchQuery(dna"ACG"), 4, seq, 1) === 1:0
end
@testset "backward" begin
# TODO: maybe this should return 8:7 like rsearch
@test findprev(ApproximateSearchQuery(dna""), 0, seq, 7) === 7:6
@test findprev(ApproximateSearchQuery(dna"AC"), 0, seq, 7) === 5:6
@test findprev(ApproximateSearchQuery(dna"AC"), 0, seq, 5) === 1:2
@test findprev(ApproximateSearchQuery(dna"AT"), 0, seq, 7) === nothing
@test findprev(ApproximateSearchQuery(dna"AT"), 1, seq, 7) === 5:6
@test findprev(ApproximateSearchQuery(dna"AT"), 1, seq, 6) === 5:6
@test findprev(ApproximateSearchQuery(dna"AT"), 1, seq, 5) === 5:5
@test findprev(ApproximateSearchQuery(dna"NG"), 0, seq, 7) === nothing
@test findprev(ApproximateSearchQuery(dna"NG"), 1, seq, 6) === 3:3
@test findprev(ApproximateSearchQuery(dna"GN"), 0, seq, 7) === nothing
@test findprev(ApproximateSearchQuery(dna"GN"), 1, seq, 7) === 7:7
@test findprev(ApproximateSearchQuery(dna"ACG"), 0, seq, 7) === 5:7
@test findprev(ApproximateSearchQuery(dna"ACG"), 1, seq, 7) === 6:7
@test findprev(ApproximateSearchQuery(dna"ACG"), 2, seq, 7) === 7:7
@test findprev(ApproximateSearchQuery(dna"ACG"), 3, seq, 7) === 7:6
@test findprev(ApproximateSearchQuery(dna"ACG"), 4, seq, 7) === 7:6
end
end
end
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | code | 7644 | @testset "ExactSearchQuery" begin
seq = dna"ACGTACG"
@testset "isequal" begin
@testset "forward" begin
@test findfirst(ExactSearchQuery(dna""), seq) === 1:0
@test findfirst(ExactSearchQuery(dna"AC"), seq) === 1:2
@test findnext(ExactSearchQuery(dna"AC"), seq, 2) === 5:6
#@test findfirst(ExactSearchQuery(dna"AC"), seq, 2, 5) === nothing
@test findfirst(ExactSearchQuery(dna"TG"), seq) === nothing
@test findfirst(ExactSearchQuery(dna"TN"), seq) === nothing
@test findfirst(ExactSearchQuery(dna"ACG"), seq) === 1:3
@test findnext(ExactSearchQuery(dna"ACG"), seq, 2) === 5:7
@test findfirst(ExactSearchQuery(seq), seq) === 1:lastindex(seq)
@test findfirst(ExactSearchQuery(dna""), dna"") === 1:0
@test findnext(ExactSearchQuery(dna""), dna"", -1) === 1:0
@test findnext(ExactSearchQuery(dna""), dna"", 2) === nothing
@test first(findfirst(ExactSearchQuery(dna""), seq)) === 1
@test first(findfirst(ExactSearchQuery(dna"AC"), seq)) === 1
@test first(findnext(ExactSearchQuery(dna"AC"), seq, 2)) === 5
#@test findnext(ExactSearchQuery(dna"AC"), seq, 2) === nothing
query = ExactSearchQuery(dna"ACG")
@test findfirst(query, seq) === 1:3
@test findnext(query, seq, 2) === 5:7
#@test findfirst(query, seq, 2, 6) === nothing
@test first(findfirst(query, seq)) === 1
@test first(findnext(query, seq, 2)) === 5
#@test findfirst(query, seq, 2, 6) === nothing
end
@testset "backward" begin
@test findlast(ExactSearchQuery(dna""), seq) === 8:7
@test findlast(ExactSearchQuery(dna"AC"), seq) === 5:6
@test findprev(ExactSearchQuery(dna"AC"), seq, 5) === 1:2
#@test findlast(ExactSearchQuery(dna"AC"), seq, 5, 2) === nothing
@test findlast(ExactSearchQuery(dna"TG"), seq) === nothing
@test findlast(ExactSearchQuery(dna"TN"), seq) === nothing
@test findlast(ExactSearchQuery(dna"ACG"), seq) === 5:7
@test findprev(ExactSearchQuery(dna"ACG"), seq, 6) === 1:3
@test findlast(ExactSearchQuery(seq), seq) === 1:lastindex(seq)
@test findlast(ExactSearchQuery(dna""), dna"") === 1:0
@test findprev(ExactSearchQuery(dna""), dna"", 2) === 1:0
@test findprev(ExactSearchQuery(dna""), dna"", -1) === nothing
@test first(findlast(ExactSearchQuery(dna""), seq)) === 8
@test first(findlast(ExactSearchQuery(dna"AC"), seq)) === 5
@test first(findprev(ExactSearchQuery(dna"AC"), seq, 5)) === 1
#@test findlast(ExactSearchQuery(dna"AC"), seq, 5, 2) === nothing
query = ExactSearchQuery(dna"ACG")
@test findlast(query, seq) === 5:7
@test findprev(query, seq, 6) === 1:3
#@test findlast(query, seq, 2, 6) === nothing
@test first(findlast(query, seq)) === 5
@test first(findprev(query, seq, 6)) === 1
#@test findlast(query, seq, 6, 2) === nothing
end
@testset "occursin" begin
@test occursin(ExactSearchQuery(dna"ACG"), dna"GGGTACACGTTT") == true
@test occursin(ExactSearchQuery(dna"TGT"), dna"GGGTACACGTGT") == true
@test occursin(ExactSearchQuery(dna"GGG"), dna"GGGTACACGTGT") == true
@test occursin(ExactSearchQuery(dna"AAA"), dna"GGGTACACGTGT") == false
end
end
@testset "iscompatible" begin
@testset "forward" begin
@test findfirst(ExactSearchQuery(dna"", iscompatible), seq) === 1:0
@test findfirst(ExactSearchQuery(dna"AC", iscompatible), seq) === 1:2
@test findnext(ExactSearchQuery(dna"AC", iscompatible), seq, 2) === 5:6
#@test findfirst(ExactSearchQuery(dna"AC"), seq, 2, 5) === nothing
@test findfirst(ExactSearchQuery(dna"TG", iscompatible), seq) === nothing
@test findfirst(ExactSearchQuery(dna"TN", iscompatible), seq) === 4:5
@test findfirst(ExactSearchQuery(dna"ACG", iscompatible), seq) === 1:3
@test findnext(ExactSearchQuery(dna"ACG", iscompatible), seq, 2) === 5:7
@test findfirst(ExactSearchQuery(seq, iscompatible), seq) === 1:lastindex(seq)
@test findfirst(ExactSearchQuery(dna"", iscompatible), dna"") === 1:0
@test findnext(ExactSearchQuery(dna"", iscompatible), dna"", -1) === 1:0
@test findnext(ExactSearchQuery(dna"", iscompatible), dna"", 2) === nothing
@test first(findfirst(ExactSearchQuery(dna"", iscompatible), seq)) === 1
@test first(findfirst(ExactSearchQuery(dna"AC", iscompatible), seq)) === 1
@test first(findnext(ExactSearchQuery(dna"AC", iscompatible), seq, 2)) === 5
#@test findnext(ExactSearchQuery(dna"AC"), seq, 2) === nothing
query = ExactSearchQuery(dna"ACG", iscompatible)
@test findfirst(query, seq) === 1:3
@test findnext(query, seq, 2) === 5:7
#@test findfirst(query, seq, 2, 6) === nothing
@test first(findfirst(query, seq)) === 1
@test first(findnext(query, seq, 2)) === 5
#@test findfirst(query, seq, 2, 6) === nothing
end
@testset "backward" begin
@test findlast(ExactSearchQuery(dna"", iscompatible), seq) === 8:7
@test findlast(ExactSearchQuery(dna"AC", iscompatible), seq) === 5:6
@test findprev(ExactSearchQuery(dna"AC", iscompatible), seq, 5) === 1:2
#@test findlast(ExactSearchQuery(dna"AC"), seq, 5, 2) === nothing
@test findlast(ExactSearchQuery(dna"TG", iscompatible), seq) === nothing
@test findlast(ExactSearchQuery(dna"TN", iscompatible), seq) === 4:5
@test findlast(ExactSearchQuery(dna"ACG", iscompatible), seq) === 5:7
@test findprev(ExactSearchQuery(dna"ACG", iscompatible), seq, 6) === 1:3
@test findlast(ExactSearchQuery(seq, iscompatible), seq) === 1:lastindex(seq)
@test findlast(ExactSearchQuery(dna"", iscompatible), dna"") === 1:0
@test findprev(ExactSearchQuery(dna"", iscompatible), dna"", 2) === 1:0
@test findprev(ExactSearchQuery(dna"", iscompatible), dna"", -1) === nothing
@test first(findlast(ExactSearchQuery(dna"", iscompatible), seq)) === 8
@test first(findlast(ExactSearchQuery(dna"AC", iscompatible), seq)) === 5
@test first(findprev(ExactSearchQuery(dna"AC", iscompatible), seq, 5)) === 1
#@test findlast(ExactSearchQuery(dna"AC"), seq, 5, 2) === nothing
query = ExactSearchQuery(dna"ACG", iscompatible)
@test findlast(query, seq) === 5:7
@test findprev(query, seq, 6) === 1:3
#@test findlast(query, seq, 2, 6) === nothing
@test first(findlast(query, seq)) === 5
@test first(findprev(query, seq, 6)) === 1
#@test findlast(query, seq, 6, 2) === nothing
end
@testset "occursin" begin
@test occursin(ExactSearchQuery(dna"ACG", iscompatible), dna"GGGTACACGTTT") == true
@test occursin(ExactSearchQuery(dna"TGT", iscompatible), dna"GGGTACACGTGT") == true
@test occursin(ExactSearchQuery(dna"GGG", iscompatible), dna"GGGTACACGTGT") == true
@test occursin(ExactSearchQuery(dna"AAA", iscompatible), dna"GGGTACACGTGT") == false
end
end
end
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | code | 3943 | @testset "Position weight matrix" begin
@testset "PFM" begin
m = [
1 2 3
4 5 6
7 8 9
10 11 12
]
pfm = PFM{DNA}(m)
@test pfm isa PFM{DNA,Int}
@test convert(Matrix, pfm) == m
@test all(pfm[i] == m[i] for i in eachindex(pfm))
@test all(pfm[i,j] == m[i,j] for i in 1:4, j in 1:3)
@test all(pfm[ACGT[i],j] == m[i,j] for i in 1:4, j in 1:3)
@test startswith(sprint(show, pfm), string(summary(pfm), ":\n"))
@test_throws ArgumentError PFM{DNA}(vcat(m, [13 14 15]))
# indexing
pfm[5] = 100
@test pfm[5] == pfm[DNA_A,2] == 100
pfm[2,3] = 200
@test pfm[2,3] == pfm[DNA_C,3] == 200
pfm[DNA_G,1] = 300
@test pfm[3,1] == pfm[DNA_G,1] == 300
@test_throws ArgumentError pfm[DNA_N,1]
# broadcast
@test log.(pfm) isa PFM{DNA,Float64}
@test log.(pfm) == PFM{DNA}(log.(m))
@test pfm .+ 1 isa PFM{DNA,Int}
@test pfm .+ 1 == PFM{DNA}(m .+ 1)
@test pfm .+ 1.1 isa PFM{DNA,Float64}
@test pfm .+ 1.1 == PFM{DNA}(m .+ 1.1)
@test pfm .+ [0,1,2,3] isa PFM{DNA,Int}
@test pfm .+ [0,1,2,3] == PFM{DNA}(m .+ [0,1,2,3])
set = LongDNA{4}.(split(
"""
ACG
ATG
AGG
ACC
"""))
pfm = PFM(set)
@test pfm == [
4 0 0 # A
0 2 1 # C
0 1 3 # G
0 1 0 # T
]
@test pfm == PFM(Set(set))
@test_throws ArgumentError PFM(LongDNA{4}[])
@test_throws ArgumentError PFM(["foo"])
# @test_throws ArgumentError PFM([LongDNA{4}("AA"), LongDNA{4}("AA")])
@test_throws ArgumentError PFM([LongDNA{4}("AA"), LongDNA{4}("AAA")])
end
@testset "PWM" begin
m = [
1 2 3
4 5 6
7 8 9
10 11 12
]
pfm = PFM{DNA}(m)
pwm = PWM(pfm)
raw = log2.((pfm ./ sum(pfm, dims=1)) ./ fill(1/4, 4))
@test pwm isa PWM{DNA,Float64}
@test pwm == raw
@test PWM(pfm, prior=[0.1, 0.4, 0.4, 0.1]) == log2.((pfm ./ sum(pfm, dims=1)) ./ [0.1, 0.4, 0.4, 0.1])
@test PWM(pfm) == PWM{DNA}(raw)
@test maxscore(pwm) ≈ sum(maximum(raw, dims=1))
@test maxscore(PWM{DNA}(zeros(4, 0))) === 0.0
@test PWM(pfm .+ 0.1) isa PWM{DNA,Float64} # pseudo count
@test_throws Exception PWM{DNA}(hcat(m, [13, 14, 15]))
@test_throws Exception PWM(pfm, prior=normalize([0,1,2,3], 1))
@test_throws Exception PWM(pfm, prior=normalize([0,1,2,3], 1).+1e-3)
@test all(pwm[i] === pwm[i] for i in eachindex(pwm))
@test all(pwm[i,j] === raw[i,j] for i in 1:4, j in 1:3)
@test all(pwm[ACGT[i],j] === pwm[i,j] for i in 1:4, j in 1:3)
@test startswith(sprint(show, pwm), string(summary(pwm), ":\n"))
end
@testset "findfirst and findlast" begin
seq = dna"ACGATNATCGCGTANTG"
data = [
1.0 0.1 0.2
0.0 0.2 0.3
0.1 0.2 0.0
0.9 0.5 0.2
]
pwm = PWM{DNA}(data)
@test maxscore(pwm) == 1.8
@test scoreat(seq, pwm, 1) === 1.2
@test findfirst(PWMSearchQuery(pwm, 1.0), seq) === 1
@test findfirst(PWMSearchQuery(pwm, 1.4), seq) === 4
@test findfirst(PWMSearchQuery(pwm, 1.8), seq) === 7
@test findfirst(PWMSearchQuery(pwm, 2.0), seq) === nothing
@test_throws ArgumentError findfirst(PWMSearchQuery(pwm, 1.0), LongRNA{4}(seq))
@test findlast(PWMSearchQuery(pwm, 1.0), seq) == 14
@test findlast(PWMSearchQuery(pwm, 1.4), seq) === 7
@test findlast(PWMSearchQuery(pwm, 1.8), seq) === 7
@test findlast(PWMSearchQuery(pwm, 2.0), seq) === nothing
@test_throws ArgumentError findlast(PWMSearchQuery(pwm, 1.0), LongRNA{4}(seq))
end
end
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | code | 5033 | @testset "Regular Expression" begin
# First test the biore syntax works as intended
@test biore"A+[CG]N"d == BioRegex{DNA}("A+[CG]N")
@test biore"A+[CG]N"d != BioRegex{DNA}("A*[CG]N")
@test biore"MV?(I|L)W{3,5}$"a == BioRegex{AminoAcid}("MV?(I|L)W{3,5}\$")
@test biore"MV?(I|L)W{3,5}$"a != BioRegex{AminoAcid}("MV?(I|L)W{3,4}\$")
@test biore"A+UUC*"r == BioRegex{RNA}("A+UUC*")
@test biore"A+UC*"r != BioRegex{RNA}("A+UUC*")
@test isa(biore"A+"d, BioSequences.RE.Regex{DNA})
@test isa(biore"A+"r, BioSequences.RE.Regex{RNA})
@test isa(biore"A+"a, BioSequences.RE.Regex{AminoAcid})
@test isa(biore"A+"dna, BioSequences.RE.Regex{DNA})
@test isa(biore"A+"rna, BioSequences.RE.Regex{RNA})
@test isa(biore"A+"aa, BioSequences.RE.Regex{AminoAcid})
@test_throws Exception eval(:(biore"A+"))
@test_throws Exception eval(:(biore"A+"foo))
@test string(biore"A+"dna) == "biore\"A+\"dna"
@test string(biore"A+"rna) == "biore\"A+\"rna"
@test string(biore"A+"aa) == "biore\"A+\"aa"
@test occursin(biore"A"d, dna"A")
@test !occursin(biore"A"d, dna"C")
@test !occursin(biore"A"d, dna"G")
@test !occursin(biore"A"d, dna"T")
@test occursin(biore"N"d, dna"A")
@test occursin(biore"N"d, dna"C")
@test occursin(biore"N"d, dna"G")
@test occursin(biore"N"d, dna"T")
@test occursin(biore"[AT]"d, dna"A")
@test !occursin(biore"[AT]"d, dna"C")
@test !occursin(biore"[AT]"d, dna"G")
@test occursin(biore"[AT]"d, dna"T")
@test !occursin(biore"[^AT]"d, dna"A")
@test occursin(biore"[^AT]"d, dna"C")
@test occursin(biore"[^AT]"d, dna"G")
@test !occursin(biore"[^AT]"d, dna"T")
re = biore"^A(C+G*)(T{2,})N$"d
@test !occursin(re, dna"AC")
@test !occursin(re, dna"AGTT")
@test !occursin(re, dna"CCGTT")
@test !occursin(re, dna"ACTT")
@test !occursin(re, dna"ACTTGT")
@test occursin(re, dna"ACGTTA")
@test occursin(re, dna"ACGTTT")
@test occursin(re, dna"ACCGGTTT")
@test occursin(re, dna"ACCGGTTT")
@test occursin(re, dna"ACCGGTTTA")
@test occursin(re, dna"ACCGGTTTG")
@test matched(match(re, dna"ACCGTTTTA")) == dna"ACCGTTTTA"
@test captured(match(re, dna"ACCGTTTTA"))[1] == dna"CCG"
@test captured(match(re, dna"ACCGTTTTA"))[2] == dna"TTTT"
# greedy
@test matched(match(biore"A*"d, dna"AAA")) == dna"AAA"
@test matched(match(biore"A+"d, dna"AAA")) == dna"AAA"
@test matched(match(biore"A?"d, dna"AAA")) == dna"A"
@test matched(match(biore"A{2}"d, dna"AAA")) == dna"AA"
@test matched(match(biore"A{2,}"d, dna"AAA")) == dna"AAA"
@test matched(match(biore"A{2,4}"d, dna"AAA")) == dna"AAA"
# lazy
@test matched(match(biore"A*?"d, dna"AAA")) == dna""
@test matched(match(biore"A+?"d, dna"AAA")) == dna"A"
@test matched(match(biore"A??"d, dna"AAA")) == dna""
@test matched(match(biore"A{2}?"d, dna"AAA")) == dna"AA"
@test matched(match(biore"A{2,}?"d, dna"AAA")) == dna"AA"
@test matched(match(biore"A{2,4}?"d, dna"AAA")) == dna"AA"
# search
@test findfirst(biore"A+"d, dna"ACGTAAT") == 1:1
@test findfirst(biore"A+"d, dna"ACGTAAT", 1) == 1:1
@test findfirst(biore"A+"d, dna"ACGTAAT", 2) == 5:6
@test findfirst(biore"A+"d, dna"ACGTAAT", 7) === nothing
# eachmatch
matches = [dna"CG", dna"GC", dna"GC", dna"CG"]
for (i, m) in enumerate(eachmatch(biore"GC|CG"d, dna"ACGTTATGCATGGCG"))
@test matched(m) == matches[i]
end
matches = [dna"CG", dna"GC", dna"GC"]
for (i, m) in enumerate(collect(eachmatch(biore"GC|CG"d, dna"ACGTTATGCATGGCG", false)))
@test matched(m) == matches[i]
end
# matchall
matchall(pat, seq, overlap=true) =
collect(map(matched, eachmatch(pat, seq, overlap)))
@test matchall(biore"A*"d, dna"") == [dna""]
@test matchall(biore"A*"d, dna"AAA") == [
dna"AAA", dna"AA", dna"A", dna"",
dna"AA", dna"A", dna"",
dna"A", dna""]
@test matchall(biore"AC*G*T"d, dna"ACCGGGT") == [dna"ACCGGGT"]
@test matchall(biore"A*"d, dna"", false) == [dna""]
@test matchall(biore"A*"d, dna"AAA", false) == [dna"AAA"]
@test matchall(biore"AC*G*T"d, dna"ACCGGGT", false) == [dna"ACCGGGT"]
# RNA and Amino acid
@test occursin(biore"U(A[AG]|GA)$"r, rna"AUUGUAUGA")
@test !occursin(biore"U(A[AG]|GA)$"r, rna"AUUGUAUGG")
@test occursin(biore"T+[NQ]A?P"a, aa"MTTQAPMFTQPL")
@test occursin(biore"T+[NQ]A?P"a, aa"MTTAAPMFTQPL")
@test !occursin(biore"T+[NQ]A?P"a, aa"MTTAAPMFSQPL")
# PROSITE
@test occursin(prosite"[AC]-x-V-x(4)-{ED}", aa"ADVAARRK")
@test occursin(prosite"[AC]-x-V-x(4)-{ED}", aa"CPVAARRK")
@test !occursin(prosite"[AC]-x-V-x(4)-{ED}", aa"ADVAARRE")
@test !occursin(prosite"[AC]-x-V-x(4)-{ED}", aa"CPVAARK")
@test occursin(prosite"<[AC]-x-V-x(4)-{ED}>", aa"ADVAARRK")
@test !occursin(prosite"<[AC]-x-V-x(4)-{ED}>", aa"AADVAARRK")
@test !occursin(prosite"<[AC]-x-V-x(4)-{ED}>", aa"ADVAARRKA")
end
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 8441 | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## [UNRELEASED]
* Relax requirement of `decode`, such that it no longer needs to check for
invalid data. Note that this change is not breaking, since it is not possible
for correctly-implemented `Alphabet` and `BioSequence` to store invalid data.
## [3.1.6]
* The heuristics for translating sequences with ambiguous symbols is now improved.
Now, `translate` does not rely on heuristics but uses an algorithm that always
returns exactly the right amino acid in the face of ambiguous nucleotides.
## [3.1.5]
* Attempting to translate a nucleotide sequence with gap symbols now throws an error (#278, see #277)
## [3.1.4]
* Migrate from SnoopPrecompile to PrecompileTools (#273)
## [3.1.3]
* Improve error when mis-encoding `LongDNA` from byte-like inputs (#267)
* Remove references to internal `Random.GLOBAL_RNG` (#265)
## [3.1.2]
* Fix bug in converting `LongSubSeq` to `LongSequence` (#261)
## [3.1.1]
* Add `iterate` method for `Alphabets` (#233)
* Add SnoopPrecompile workload and dependency on SnoopPrecompile (#257)
## [3.1.0]
### Added
* Add `rand!([::AbstractRNG], ::LongSequence, [::Sampler])` methods
## [3.0.2]
### Added
* It is now possible to `join` BioSymbols into a BioSequence.
* Add `findall` methods to `BioSequence`
## [3.0.1]
Release has been yanked from General Registry
## [3.0]
### Removed
- Removed `unsafe_setindex!`. Instead, use normal setindex with `@inbounds`.
- Removed minhashing functionality - see package MinHash.jl
- Removed composition functionality - see package Kmers.jl
- Removed ReferenceSequence functionality
- Removed demultiplexer functionality
- Removed kmer functionality - this is moved to Kmers.jl
- Removed VoidAlphabet and CharAlphabet
- Removed ConditionIterator
### Added
- Added type `LongSubSeq`, a view into a `LongSequence`.
- Added method `translate!(::LongAminoAcidSeq, ::LongNucleotideSeq; kwargs...)`
- Added method `join(::Type{T<:BioSeuence}, it)` to join an iterable of biosequences
to a new instance of T.
- Added method `join!(s::BioSequence, it)`, an in-place version of `join`
### Changed
- `LongSequence` is no longer copy-on-write. For views, use `LongSubSeq`.
- Renamed `LongAminoAcidSeq` -> `LongAA`, `LongNucleotideSeq` -> `LongNuc`
`LongRNASeq` -> `LongRNA` and `LongDNASeq` -> `LongDNA`
- The interface for `Alphabet` and `BioSequence` is now more clearly defined, documented, and tested.
- The constructor `LongSequence{A}(::Integer)` has been removed in favor of `LongSequence{A}(undef, ::Integer)`.
- Biological sequences can no longer be converted to/from strings and vectors.
- Updated the element and substring search API to conform to `Base.find*` patterns.
## [2.0.1]
### Changed
- Fixed syntax errors where functions were marked with `@inbounds` instead of
`@inline`.
## [2.0]
### Added
- New subtypes of Random.Sampler, SamplerUniform and SamplerWeighted.
- Random `LongSequence`s can now be created with `randseq`,
optionally using a sampler to specify element distribution.
- All random `LongSequence` generator methods take an optional AbstractRNG
argument.
- Add methods to `randseq` to optimize random generation of `NucleicAcid` or
`AminoAcid` `LongSequence`s.
- BioGenerics is now a dependency - replaces BioCore.
- A `SkipmerFactory` iterator that allows iteration over the Skipmers in a
nucleotide sequence. A Skipmer is a `Mer` (see changed below), that is
generated using a certain cyclic nucleotide sampling pattern.
See [this paper](https://www.biorxiv.org/content/early/2017/09/19/179960.full.pdf+html)
for more details.
- A `BigMer` parametric primitive type has been added, that has the same
functionality as `Mer` (see changed section), but uses 128 bits instead of 64.
- An abstract parametric type called `AbstractMer` has been added to unify `Mer`
and `BigMer`.
- Generators of bit-parallel iteration code have been introduced to help
developers write bitparallel implementations of some methods. Counting GC
content, matches and mismatches have been migrated to use these generators.
- Added `occursin` methods for exact matching.
### Changed
- The abstract `Sequence` type is now called `BioSequence{A}`.
- The type previously called `BioSequence{A}` is now `LongSequence{A}`.
- `Kmers` are now a parametric primitive type: `Mer{A<:NucleicAcidAlphabet{2},K}`.
- `unsafe_setindex!` has been made systematic for all `setindex` methods as a
way of bypassing all bound checking and `orphan!` calls.
- Kmer string literals have been updated, they are now `mer""` string literals,
and they have a flag to enforce the type of `Mer` e.g.: `mer"ATCG"dna`,
`mer"AUCG"rna`
- No longer use an old version of Twiddle and deprecated functions.
- Using `Base.count` with certain functions and sequence combinations dispatches
to highly optimized bit-parallel implementations, falling back to a default
naive counting loop by default for all other predicate-sequence combinations.
- No more implicit conversion from strings to biological sequences. The `Base.convert`
methods have been renamed to `Base.parse` methods.
### Removed
- The FASTQ module.
- The FASTA module.
- The TwoBit module.
- The ABIF module.
- BioCore is no longer a dependency.
- Automa is no longer a dependency.
## [1.1.0]
### Changed
- Automatic conversion of `LongDNASeq` to `LongRNASeq` when translating
sequences.
- Add `alternative_start` keyword argument to translate().
- Add abstract type for kmer iterators.
- :racehorse: Faster kmer iteration.
- Fixed indexing in ABIF records.
## [1.0.0] - 2018-08-23
### Added
- Issue and PR templates.
- Code of Conduct and Contributing files.
- A changelog file.
- Support for julia v0.7 and v1.0.
### Removed
- :exclamation: Support for julia v0.6.
## [0.8.3] - 2018-02-28
### Changed
- Fix the `sequence` method so as the sequence type can be specified, allowing
type-stable efficient code generation.
## [0.8.2] - 2018-02-19
### Changed
- A bug fix for `FASTA.Record` writing where the width parameter of a
`FASTA.Writer` is less than or equal to zero.
## [0.8.1] - 2017-11-10
### Changed
- Update documentation generation.
- Fixes to type definition keywords.
- Bit-parallel GC counting.
## [0.8.0] - 2017-08-16
### Added
- Position weight matrix search functionality.
- A generalised composition method.
- `typemin` and `typemax` methods for `Kmer` types.
### Changed
- `MinHash` function now generalised to `Reader` types.
- Updates to doc tests.
## [0.7.0] - 2017-07-28
### Added
- Support for julia v0.6 only.
### Removed
- :exclamation: Dropped support for julia v0.5.
## [0.6.3] - 2017-07-06
### Changed
- Iterators.jl is not longer used as a dependency in favour of Itertools.jl.
## [0.6.1] - 2017-06-20
### Changed
- Bug-fix for site-counting algorithm.
## [0.6.0] - 2017-06-14
### Added
- :arrow_up: Compatibility with julia v0.6.
- The `ungap` and `ungap!` methods, that are shorthand for filtering gaps from
biological sequences.
### Changed
- Bug fixes for Kmer iteration that were caused by gaps in 4-bit encoded sequences.
## [0.5.0] - 2017-06-07
### Added
- All files pertaining to the old Bio.Seq module.
[Unreleased]: https://github.com/BioJulia/BioSequences.jl/compare/v2.0.1...HEAD
[2.0.1]: https://github.com/BioJulia/BioSequences.jl/compare/v2.0.0...v2.0.1
[2.0.0]: https://github.com/BioJulia/BioSequences.jl/compare/v1.1.0...v2.0.0
[1.1.0]: https://github.com/BioJulia/BioSequences.jl/compare/v1.0.0...v1.1.0
[1.0.0]: https://github.com/BioJulia/BioSequences.jl/compare/v0.8.3...v1.0.0
[0.8.3]: https://github.com/BioJulia/BioSequences.jl/compare/v0.8.2...v0.8.3
[0.8.2]: https://github.com/BioJulia/BioSequences.jl/compare/v0.8.1...v0.8.2
[0.8.1]: https://github.com/BioJulia/BioSequences.jl/compare/v0.8.0...v0.8.1
[0.8.0]: https://github.com/BioJulia/BioSequences.jl/compare/v0.7.0...v0.8.0
[0.7.0]: https://github.com/BioJulia/BioSequences.jl/compare/v0.6.3...v0.7.0
[0.6.3]: https://github.com/BioJulia/BioSequences.jl/compare/v0.6.1...v0.6.3
[0.6.1]: https://github.com/BioJulia/BioSequences.jl/compare/v0.6.0...v0.6.1
[0.6.0]: https://github.com/BioJulia/BioSequences.jl/compare/v0.5.0...v0.6.0
[0.5.0]: https://github.com/BioJulia/BioSequences.jl/tree/v0.5.0
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 3807 | # <img src="./sticker.svg" width="30%" align="right" /> BioSequences
[](https://github.com/BioJulia/BioSequences.jl/releases/latest)
[](https://github.com/BioJulia/BioSequences.jl/blob/master/LICENSE)
[](https://biojulia.github.io/BioSequences.jl/stable)
[](https://www.repostatus.org/#active)
[](https://gitter.im/BioJulia/BioSequences.jl)
## Description
BioSequences provides data types and methods for common operations with
biological sequences, including DNA, RNA, and amino acid sequences.
## Installation
You can install BioSequences from the julia
REPL. Press `]` to enter pkg mode, and enter the following:
```julia
add BioSequences
```
If you are interested in the cutting edge of the development, please check out
the master branch to try new features before release.
## Testing
BioSequences is tested against Julia `1.X` on Linux, OS X, and Windows.
[](https://github.com/BioJulia/BioSequences.jl/actions?query=workflow%3A%22Unit+tests%22+branch%3Amaster)
[](https://github.com/BioJulia/BioSequences.jl/actions?query=workflow%3ADocumentation+branch%3Amaster)
[](https://codecov.io/gh/BioJulia/BioSequences.jl)
[](https://github.com/BioJulia/BioSequences.jl/actions/workflows/Downstream.yml)
## Contributing
We appreciate contributions from users including reporting bugs, fixing
issues, improving performance and adding new features.
Take a look at the [contributing files](https://github.com/BioJulia/Contributing)
detailed contributor and maintainer guidelines, and code of conduct.
## Backers & Sponsors
Thank you to all our backers and sponsors!
[](https://opencollective.com/biojulia/sponsor/0/website)
[](https://opencollective.com/biojulia/sponsor/1/website)
[](https://opencollective.com/biojulia/sponsor/2/website)
[](https://opencollective.com/biojulia/sponsor/3/website)
[](https://opencollective.com/biojulia/sponsor/4/website)
[](https://opencollective.com/biojulia/sponsor/5/website)
[](https://opencollective.com/biojulia/sponsor/6/website)
[](https://opencollective.com/biojulia/sponsor/7/website)
[](https://opencollective.com/biojulia/sponsor/8/website)
[](https://opencollective.com/biojulia/sponsor/9/website)
## Questions?
If you have a question about contributing or using BioJulia software, come
on over and chat to us on [the Julia Slack workspace](https://julialang.org/slack/), or you can try the
[Bio category of the Julia discourse site](https://discourse.julialang.org/c/domain/bio).
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 1874 | <!--- Provide a general summary of the issue in the Title above -->
> _This template is rather extensive. Fill out all that you can, if are a new contributor or you're unsure about any section, leave it unchanged and a reviewer will help you_ :smile:. _This template is simply a tool to help everyone remember the BioJulia guidelines, if you feel anything in this template is not relevant, simply delete it._
## Expected Behavior
<!--- If you're describing a bug, tell us what you expect to happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work -->
## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution / Implementation
<!--- If describing a bug, suggest a fix/reason for the bug (optional) -->
<!--- If you're suggesting a change/improvement, suggest ideas how to implement the addition or change -->
## Steps to Reproduce (for bugs)
<!--- You may include copy/pasteable snippets or a list of steps to reproduce the bug -->
1.
2.
3.
4.
<!--- Optionally, provide a link to a live example -->
<!--- You can use [this tool](https://www.cockos.com/licecap/) -->
<!--- ...Or [this tool](https://github.com/colinkeenan/silentcast) -->
<!--- ...Or [this tool](https://github.com/GNOME/byzanz) on Linux -->
## Context
<!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
- Package Version used:
- Julia Version used:
- Operating System and version (desktop or mobile):
- Link to your project:
<!-- Can you list installed packages here? -->
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 2783 | # A clear and descriptive title (No issue numbers please)
> _This template is rather extensive. Fill out all that you can, if are a new contributor or you're unsure about any section, leave it unchanged and a reviewer will help you_ :smile:. _This template is simply a tool to help everyone remember the BioJulia guidelines, if you feel anything in this template is not relevant, simply delete it._
## Types of changes
This PR implements the following changes:
_(Please tick any or all of the following that are applicable)_
* [ ] :sparkles: New feature (A non-breaking change which adds functionality).
* [ ] :bug: Bug fix (A non-breaking change, which fixes an issue).
* [ ] :boom: Breaking change (fix or feature that would cause existing functionality to change).
## :clipboard: Additional detail
- If you have implemented new features or behaviour
- **Provide a description of the addition** in as many details as possible.
- **Provide justification of the addition**.
- **Provide a runnable example of use of your addition**. This lets reviewers
and others try out the feature before it is merged or makes it's way to release.
- If you have changed current behaviour...
- **Describe the behaviour prior to you changes**
- **Describe the behaviour after your changes** and justify why you have made the changes,
Please describe any breakages you anticipate as a result of these changes.
- **Does your change alter APIs or existing exposed methods/types?**
If so, this may cause dependency issues and breakages, so the maintainer
will need to consider this when versioning the next release.
- If you are implementing changes that are intended to increase performance, you
should provide the results of a simple performance benchmark exercise
demonstrating the improvement. Especially if the changes make code less legible.
## :ballot_box_with_check: Checklist
- [ ] :art: The changes implemented is consistent with the [julia style guide](https://docs.julialang.org/en/v1/manual/style-guide/).
- [ ] :blue_book: I have updated and added relevant docstrings, in a manner consistent with the [documentation styleguide](https://docs.julialang.org/en/v1/manual/documentation/).
- [ ] :blue_book: I have added or updated relevant user and developer manuals/documentation in `docs/src/`.
- [ ] :ok: There are unit tests that cover the code changes I have made.
- [ ] :ok: The unit tests cover my code changes AND they pass.
- [ ] :pencil: I have added an entry to the `[UNRELEASED]` section of the manually curated `CHANGELOG.md` file for this repository.
- [ ] :ok: All changes should be compatible with the latest stable version of Julia.
- [ ] :thought_balloon: I have commented liberally for any complex pieces of internal code.
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 6746 | ```@meta
CurrentModule = BioSequences
DocTestSetup = quote
using BioSequences
end
```
# Construction & conversion
Here we will showcase the various ways you can construct the various sequence
types in BioSequences.
## Constructing sequences
### From strings
Sequences can be constructed from strings using their constructors:
```jldoctest
julia> LongDNA{4}("TTANC")
5nt DNA Sequence:
TTANC
julia> LongSequence{DNAAlphabet{2}}("TTAGC")
5nt DNA Sequence:
TTAGC
julia> LongRNA{4}("UUANC")
5nt RNA Sequence:
UUANC
julia> LongSequence{RNAAlphabet{2}}("UUAGC")
5nt RNA Sequence:
UUAGC
```
Type alias' can also be used for brevity.
```jldoctest
julia> LongDNA{4}("TTANC")
5nt DNA Sequence:
TTANC
julia> LongDNA{2}("TTAGC")
5nt DNA Sequence:
TTAGC
julia> LongRNA{4}("UUANC")
5nt RNA Sequence:
UUANC
julia> LongRNA{2}("UUAGC")
5nt RNA Sequence:
UUAGC
```
### Constructing sequences from arrays of BioSymbols
Sequences can be constructed using vectors or arrays of a `BioSymbol` type:
```jldoctest
julia> LongDNA{4}([DNA_T, DNA_T, DNA_A, DNA_N, DNA_C])
5nt DNA Sequence:
TTANC
julia> LongSequence{DNAAlphabet{2}}([DNA_T, DNA_T, DNA_A, DNA_G, DNA_C])
5nt DNA Sequence:
TTAGC
```
### Constructing sequences from other sequences
You can create sequences, by concatenating other sequences together:
```jldoctest
julia> LongDNA{2}("ACGT") * LongDNA{2}("TGCA")
8nt DNA Sequence:
ACGTTGCA
julia> repeat(LongDNA{4}("TA"), 10)
20nt DNA Sequence:
TATATATATATATATATATA
julia> LongDNA{4}("TA") ^ 10
20nt DNA Sequence:
TATATATATATATATATATA
```
Sequence views (`LongSubSeq`s) are special, in that they do not own their own data,
and must be constructed from a `LongSequence` or another `LongSubSeq`:
```jldoctest
julia> seq = LongDNA{4}("TACGGACATTA")
11nt DNA Sequence:
TACGGACATTA
julia> seqview = LongSubSeq(seq, 3:7)
5nt DNA Sequence:
CGGAC
julia> seqview2 = @view seq[1:3]
3nt DNA Sequence:
TAC
julia> typeof(seqview) == typeof(seqview2) && typeof(seqview) <: LongSubSeq
true
```
## Conversion of sequence types
You can convert between sequence types, if the sequences are compatible - that is, if the source sequence does not contain symbols that are un-encodable by the destination type.
```jldoctest
julia> dna = dna"TTACGTAGACCG"
12nt DNA Sequence:
TTACGTAGACCG
julia> dna2 = convert(LongDNA{2}, dna)
12nt DNA Sequence:
TTACGTAGACCG
```
DNA/RNA are special in that they can be converted to each other, despite containing distinct symbols.
When doing so, `DNA_T` is converted to `RNA_U` and vice versa.
```jldoctest
julia> convert(LongRNA{2}, dna"TAGCTAGG")
8nt RNA Sequence:
UAGCUAGG
```
## String literals
BioSequences provides several string literal macros for creating sequences.
!!! note
When you use literals you may mix the case of characters.
### Long sequence literals
```jldoctest
julia> dna"TACGTANNATC"
11nt DNA Sequence:
TACGTANNATC
julia> rna"AUUUGNCCANU"
11nt RNA Sequence:
AUUUGNCCANU
julia> aa"ARNDCQEGHILKMFPSTWYVX"
21aa Amino Acid Sequence:
ARNDCQEGHILKMFPSTWYVX
```
However, it should be noted that by default these sequence literals
allocate the `LongSequence` object before the code containing the sequence
literal is run.
This means there may be occasions where your program does not behave as you
first expect.
For example consider the following code:
```jldoctest
julia> function foo()
s = dna"CTT"
push!(s, DNA_A)
end
foo (generic function with 1 method)
```
```@meta
DocTestSetup = quote
using BioSequences
function foo()
s = dna"CTT"d
push!(s, DNA_A)
end
end
```
You might expect that every time you call `foo`, that a DNA sequence `CTTA` would
be returned. You might expect that this is because every time `foo` is called,
a new DNA sequence variable `CTT` is created, and the `A` nucleotide is pushed
to it, and the result, `CTTA` is returned.
In other words you might expect the following output:
```jldoctest
julia> foo()
4nt DNA Sequence:
CTTA
julia> foo()
4nt DNA Sequence:
CTTA
julia> foo()
4nt DNA Sequence:
CTTA
```
However, this is not what happens, instead the following happens:
```@meta
DocTestSetup = quote
using BioSequences
function foo()
s = dna"CTT"s
push!(s, DNA_A)
end
end
```
```jldoctest
julia> foo()
4nt DNA Sequence:
CTTA
julia> foo()
5nt DNA Sequence:
CTTAA
julia> foo()
6nt DNA Sequence:
CTTAAA
```
The reason for this is because the sequence literal is allocated only once
before the first time the function `foo` is called and run. Therefore, `s` in
`foo` is always a reference to that one sequence that was allocated.
So one sequence is created before `foo` is called, and then it is pushed to
every time `foo` is called. Thus, that one allocated sequence grows with every
call of `foo`.
If you wanted `foo` to create a new sequence each time it is called,
then you can add a flag to the end of the sequence literal to dictate behaviour:
A flag of 's' means 'static': the sequence will be allocated before code is run,
as is the default behaviour described above.
However providing 'd' flag changes the behaviour: 'd' means 'dynamic':
the sequence will be allocated whilst the code is running, and not before.
So to change `foo` so as it creates a new sequence
each time it is called, simply add the 'd' flag to the sequence literal:
```@meta
DocTestSetup = quote
using BioSequences
end
```
```jldoctest
julia> function foo()
s = dna"CTT"d # 'd' flag appended to the string literal.
push!(s, DNA_A)
end
foo (generic function with 1 method)
```
Now every time `foo` is called, a new sequence `CTT` is created, and an `A`
nucleotide is pushed to it:
```@meta
DocTestSetup = quote
using BioSequences
function foo()
s = dna"CTT"d
push!(s, DNA_A)
end
end
```
```jldoctest
julia> foo()
4nt DNA Sequence:
CTTA
julia> foo()
4nt DNA Sequence:
CTTA
julia> foo()
4nt DNA Sequence:
CTTA
```
```@meta
DocTestSetup = quote
using BioSequences
end
```
So the take home message of sequence literals is this:
Be careful when you are using sequence literals inside of functions, and inside
the bodies of things like for loops. And if you use them and are unsure, use the
's' and 'd' flags to ensure the behaviour you get is the behaviour you intend.
## Comparison to other sequence types
Following Base standards, BioSequences do not compare equal to other containers even if they have the same elements.
To e.g. compare a BioSequence with a vector of DNA, compare the elements themselves:
```jldoctest
julia> seq = dna"GAGCTGA"; vec = collect(seq);
julia> seq == vec, isequal(seq, vec)
(false, false)
julia> length(seq) == length(vec) && all(i == j for (i, j) in zip(seq, vec))
true
```
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 2215 | ```@meta
CurrentModule = BioSequences
DocTestSetup = quote
using BioSequences
end
```
# Counting
BioSequences extends the `Base.count` method to provide some useful utilities for
counting the number of sites in biological sequences.
Most generically you can count the number of sites that satisfy some condition
i.e. cause some function to return `true`:
```jldoctest
julia> count(isambiguous, dna"ATCGM")
1
```
You can also use two sequences, for example to compute the number of matching
or mismatching symbols:
```jldoctest
julia> count(!=, dna"ATCGM", dna"GCCGM")
2
julia> count(==, dna"ATCGM", dna"GCCGM")
3
```
## Alias functions
A number of functions which are aliases for various invocations of `Base.count`
are provided.
| Alias function | Base.count call(s) |
| :------------- | :---------------------------------------------------------- |
| `n_ambiguous` | `count(isambiguous, seq)`, `count(isambiguous, seqa, seqb)` |
| `n_certain` | `count(iscertain, seq)`, `count(iscertain, seqa, seqb)` |
| `n_gap` | `count(isgap, seq)`, `count(isgap, seqa, seqb)` |
| `matches` | `count(==, seqa, seqb)` |
| `mismatches` | `count(!=, seqa, seqb)` |
## Bit-parallel optimisations
For the vast majority of `Base.count(f, seq)` and `Base.count(f, seqa, seqb)`
methods, a naive counting is done: the internal `count_naive` function is called,
which simply loops over each position, applies `f`, and accumulates the result.
However, for some functions, it is possible to implement highly efficient methods
that use bit-parallelism to check many elements at one time.
This is made possible by the succinct encoding of BioSequences.
Usually `f` is one of the functions provided by BioSymbols.jl or by BioSequences.jl
For such sequence and function combinations, `Base.count(f, seq)` is overloaded
to call an internal `BioSequences.count_*_bitpar` function, which is passed the
sequence(s). If you want to force BioSequences to use naive counting for the
purposes of testing or debugging for example, then you can call
`BioSequences.count_naive` directly. | BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 4371 | # BioSequences
[](https://github.com/BioJulia/BioSequences.jl/releases/latest)
[](https://github.com/BioJulia/BioSequences.jl/blob/master/LICENSE)
[](https://biojulia.github.io/BioSequences.jl/stable)
[](https://www.repostatus.org/#active)
[](https://gitter.im/BioJulia/BioSequences.jl)
## Description
BioSequences provides data types and methods for common operations with
biological sequences, including DNA, RNA, and amino acid sequences.
## Installation
You can install BioSequences from the julia
REPL. Press `]` to enter pkg mode again, and enter the following:
```julia
add BioSequences
```
If you are interested in the cutting edge of the development, please check out
the master branch to try new features before release.
## Testing
BioSequences is tested against Julia `1.X` on Linux, OS X, and Windows.
[](https://github.com/BioJulia/BioSequences.jl/actions?query=workflow%3A%22Unit+tests%22+branch%3Amaster)
[](https://github.com/BioJulia/BioSequences.jl/actions?query=workflow%3ADocumentation+branch%3Amaster)
[](https://codecov.io/gh/BioJulia/BioSequences.jl)
## Contributing
We appreciate contributions from users including reporting bugs, fixing
issues, improving performance and adding new features.
Take a look at the [contributing files](https://github.com/BioJulia/Contributing)
detailed contributor and maintainer guidelines, and code of conduct.
### Financial contributions
We also welcome financial contributions in full transparency on our
[open collective](https://opencollective.com/biojulia).
Anyone can file an expense. If the expense makes sense for the development
of the community, it will be "merged" in the ledger of our open collective by
the core contributors and the person who filed the expense will be reimbursed.
## Backers & Sponsors
Thank you to all our backers and sponsors!
Love our work and community? [Become a backer](https://opencollective.com/biojulia#backer).
[](https://opencollective.com/biojulia#backers)
Does your company use BioJulia? Help keep BioJulia feature rich and healthy by
[sponsoring the project](https://opencollective.com/biojulia#sponsor)
Your logo will show up here with a link to your website.
[](https://opencollective.com/biojulia/sponsor/0/website)
[](https://opencollective.com/biojulia/sponsor/1/website)
[](https://opencollective.com/biojulia/sponsor/2/website)
[](https://opencollective.com/biojulia/sponsor/3/website)
[](https://opencollective.com/biojulia/sponsor/4/website)
[](https://opencollective.com/biojulia/sponsor/5/website)
[](https://opencollective.com/biojulia/sponsor/6/website)
[](https://opencollective.com/biojulia/sponsor/7/website)
[](https://opencollective.com/biojulia/sponsor/8/website)
[](https://opencollective.com/biojulia/sponsor/9/website)
## Questions?
If you have a question about contributing or using BioJulia software, come
on over and chat to us on [Gitter](https://gitter.im/BioJulia/General), or you can try the
[Bio category of the Julia discourse site](https://discourse.julialang.org/c/domain/bio).
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 3908 | ```@meta
CurrentModule = BioSequences
DocTestSetup = quote
using BioSequences
end
```
# Custom BioSequences types
If you're a developing your own Bioinformatics package or method, you may find
that the reference implementation of concrete `LongSequence` types provided in
this package are not optimal for your purposes.
This page describes the interfaces for BioSequences' core types for
developers or other packages implementing their own sequence types or extending
BioSequences functionality.
## Implementing custom Alphabets
Recall the required methods that define the [`Alphabet`](@ref) interface.
To create an example custom alphabet, we need to create a singleton type, that
implements a few methods in order to conform to the interface as described in the
[`Alphabet`](@ref) documentation.
Let's do that for a restricted Amino Acid alphabet. We can test that it conforms
to the interface with the [`BioSequences.has_interface`](@ref) function.
```jldoctest
julia> struct ReducedAAAlphabet <: Alphabet end
julia> Base.eltype(::Type{ReducedAAAlphabet}) = AminoAcid
julia> BioSequences.BitsPerSymbol(::ReducedAAAlphabet) = BioSequences.BitsPerSymbol{4}()
julia> function BioSequences.symbols(::ReducedAAAlphabet)
(AA_L, AA_C, AA_A, AA_G, AA_S, AA_T, AA_P, AA_F,
AA_W, AA_E, AA_D, AA_N, AA_Q, AA_K, AA_H, AA_M)
end
julia> const (ENC_LUT, DEC_LUT) = let
enc_lut = fill(0xff, length(alphabet(AminoAcid)))
dec_lut = fill(AA_A, length(symbols(ReducedAAAlphabet())))
for (i, aa) in enumerate(symbols(ReducedAAAlphabet()))
enc_lut[reinterpret(UInt8, aa) + 0x01] = i - 1
dec_lut[i] = aa
end
(Tuple(enc_lut), Tuple(dec_lut))
end
((0x02, 0xff, 0x0b, 0x0a, 0x01, 0x0c, 0x09, 0x03, 0x0e, 0xff, 0x00, 0x0d, 0x0f, 0x07, 0x06, 0x04, 0x05, 0x08, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff), (AA_L, AA_C, AA_A, AA_G, AA_S, AA_T, AA_P, AA_F, AA_W, AA_E, AA_D, AA_N, AA_Q, AA_K, AA_H, AA_M))
julia> function BioSequences.encode(::ReducedAAAlphabet, aa::AminoAcid)
i = reinterpret(UInt8, aa) + 0x01
(i ≥ length(ENC_LUT) || @inbounds ENC_LUT[i] === 0xff) && throw(DomainError(aa))
(@inbounds ENC_LUT[i]) % UInt
end
julia> function BioSequences.decode(::ReducedAAAlphabet, x::UInt)
x ≥ length(DEC_LUT) && throw(DomainError(aa))
@inbounds DEC_LUT[x + UInt(1)]
end
julia> BioSequences.has_interface(Alphabet, ReducedAAAlphabet())
true
```
## Implementing custom BioSequences
Recall the required methods that define the [`BioSequence`](@ref) interface.
To create an example custom alphabet, we need to create a singleton type, that
implements a few methods in order to conform to the interface as described in the
[`BioSequence`](@ref) documentation.
Let's do that for a custom sequence type that is optimised to represent a small
sequence: A Codon. We can test that it conforms to the interface with the
[`BioSequences.has_interface`](@ref) function.
```jldoctest
julia> struct Codon <: BioSequence{RNAAlphabet{2}}
x::UInt8
end
julia> function Codon(iterable)
length(iterable) == 3 || error("Must have length 3")
x = zero(UInt)
for (i, nt) in enumerate(iterable)
x |= BioSequences.encode(Alphabet(Codon), convert(RNA, nt)) << (6-2i)
end
Codon(x % UInt8)
end
Codon
julia> Base.length(::Codon) = 3
julia> BioSequences.encoded_data_eltype(::Type{Codon}) = UInt
julia> function BioSequences.extract_encoded_element(x::Codon, i::Int)
((x.x >>> (6-2i)) & 3) % UInt
end
julia> Base.copy(seq::Codon) = Codon(seq.x)
julia> BioSequences.has_interface(BioSequence, Codon, [RNA_C, RNA_U, RNA_A], false)
true
```
## Interface checking functions
```@docs
BioSequences.has_interface
```
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 797 | # I/O for sequencing file formats
Versions of BioSequences prior to v2.0 provided a FASTA, FASTQ, and 2Bit
submodule for working with formatted sequence files.
After version v2.0, in order to neatly separate concerns, these submodules were
removed.
Instead there will now be dedicated BioJulia packages for each format. Each
of these will be compatible with BioSequences.
A list of all of the different formats and packages is provided below to help
you find them quickly.
| Format | Package |
|:------ |:------------------------------------------------ |
| FASTA | [FASTX.jl](https://github.com/BioJulia/FASTX.jl) |
| FASTQ | [FASTX.jl](https://github.com/BioJulia/FASTX.jl) |
| 2Bit | [TwoBit.jl](https://github.com/BioJulia/TwoBit.jl) | | BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 332 | ```@meta
CurrentModule = BioSequences
DocTestSetup = quote
using BioSequences
end
```
# Iteration
As you might expect, sequence types are iterators over their elements:
```jldoctest
julia> n = 0
0
julia> for nt in dna"ATNGNNT"
if nt == DNA_N
global n += 1
end
end
julia> n
3
``` | BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 300 | ```@meta
CurrentModule = BioSequences
DocTestSetup = quote
using BioSequences
end
```
# Predicates
A number of predicate or query functions are supported for sequences, allowing
you to check for certain properties of a sequence.
```@docs
isrepetitive
ispalindromic
hasambiguity
iscanonical
``` | BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 341 | ```@meta
CurrentModule = BioSequences
DocTestSetup = quote
using BioSequences
end
```
# Generating random sequences
## Long sequences
You can generate random long sequences using the `randdna` function and the
`Sampler`'s implemented in BioSequences:
```@docs
randseq
randdnaseq
randrnaseq
randaaseq
SamplerUniform
SamplerWeighted
``` | BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 7452 | ```@meta
CurrentModule = BioSequences
DocTestSetup = quote
using BioSequences
end
```
# Searching for sequence motifs
There are many ways to search for particular motifs in biological sequences:
1. Exact searches, where you are looking for exact matches of a particular
character of substring.
2. Approximate searches, where you are looking for sequences that are
sufficiently similar to a given sequence or family of sequences.
3. Searches where you are looking for sequences that conform to some sort of
pattern.
Like other Julia sequences such as `Vector`, you can search a `BioSequence` with the `findfirst(predicate, collection)` method pattern.
All these kinds of searches are provided in BioSequences.jl, and they all
conform to the `findnext`, `findprev`, and `occursin` patterns established in `Base` for
`String` and collections like `Vector`.
The exception is searching using the specialised
regex provided in this package, which as you shall see, conforms to the `match`
pattern established in `Base` for pcre and `String`s.
## Symbol search
```jldoctest
julia> seq = dna"ACAGCGTAGCT";
julia> findfirst(DNA_A, seq)
1
julia> findlast(DNA_A, seq)
8
julia> findnext(DNA_A, seq, 2)
3
julia> findprev(DNA_A, seq, 7)
3
julia> findall(DNA_A, seq)
3-element Vector{Int64}:
1
3
8
```
## Exact search
```@docs
ExactSearchQuery
```
## Allowing mismatches
```@docs
ApproximateSearchQuery
```
## Searching according to a pattern
### Regular expression search
Query patterns can be described in regular expressions. The syntax supports
a subset of Perl and PROSITE's notation.
Biological regexes can be constructed using the `BioRegex` constructor, for
example by doing `BioRegex{AminoAcid}("MV+")`. For bioregex literals, it is
instead recommended using the `@biore_str` macro:
The Perl-like syntax starts with `biore` (BIOlogical REgular expression)
and ends with a symbol option: "dna", "rna" or "aa". For example, `biore"A+"dna`
is a regular expression for DNA sequences and `biore"A+"aa` is for amino acid
sequences. The symbol options can be abbreviated to its first character: "d",
"r" or "a", respectively.
Here are examples of using the regular expression for `BioSequence`s:
```jldoctest
julia> match(biore"A+C*"dna, dna"AAAACC")
RegexMatch("AAAACC")
julia> match(biore"A+C*"d, dna"AAAACC")
RegexMatch("AAAACC")
julia> occursin(biore"A+C*"dna, dna"AAC")
true
julia> occursin(biore"A+C*"dna, dna"C")
false
```
`match` will return a `RegexMatch` if a match is found, otherwise it will return `nothing` if no match is found.
The table below summarizes available syntax elements.
| Syntax | Description | Example |
|:------:|:------------|:--------|
| `\|` | alternation | `"A\|T"` matches `"A"` and `"T"` |
| `*` | zero or more times repeat | `"TA*"` matches `"T"`, `"TA"` and `"TAA"` |
| `+` | one or more times repeat | `"TA+"` matches `"TA"` and `"TAA"` |
| `?` | zero or one time | `"TA?"` matches `"T"` and `"TA"` |
| `{n,}` | `n` or more times repeat | `"A{3,}"` matches `"AAA"` and `"AAAA"` |
| `{n,m}` | `n`-`m` times repeat | `"A{3,5}"` matches `"AAA"`, `"AAAA"` and `"AAAAA"`|
| `^` | the start of the sequence | `"^TAN*"` matches `"TATGT"` |
| `$` | the end of the sequence | `"N*TA$"` matches `"GCTA"` |
| `(...)` | pattern grouping | `"(TA)+"` matches `"TA"` and `"TATA"` |
| `[...]` | one of symbols | `"[ACG]+"` matches `"AGGC"` |
`eachmatch` and `findfirst` are also defined, just like usual regex and strings
found in `Base`.
```jldoctest
julia> collect(matched(x) for x in eachmatch(biore"TATA*?"d, dna"TATTATAATTA")) # overlap
4-element Vector{LongSequence{DNAAlphabet{4}}}:
TAT
TAT
TATA
TATAA
julia> collect(matched(x) for x in eachmatch(biore"TATA*"d, dna"TATTATAATTA", false)) # no overlap
2-element Vector{LongSequence{DNAAlphabet{4}}}:
TAT
TATAA
julia> findfirst(biore"TATA*"d, dna"TATTATAATTA")
1:3
julia> findfirst(biore"TATA*"d, dna"TATTATAATTA", 2)
4:8
```
Noteworthy differences from strings are:
* Ambiguous characters match any compatible characters (e.g. `biore"N"d` is equivalent to `biore"[ACGT]"d`).
* Whitespaces are ignored (e.g. `biore"A C G"d` is equivalent to `biore"ACG"d`).
The PROSITE notation is described in [ScanProsite - user
manual](https://prosite.expasy.org/scanprosite/scanprosite_doc.html). The syntax
supports almost all notations including the extended syntax. The PROSITE
notation starts with `prosite` prefix and no symbol option is needed because it
always describes patterns of amino acid sequences:
```jldoctest
julia> match(prosite"[AC]-x-V-x(4)-{ED}", aa"CPVPQARG")
RegexMatch("CPVPQARG")
julia> match(prosite"[AC]xVx(4){ED}", aa"CPVPQARG")
RegexMatch("CPVPQARG")
```
### Position weight matrix search
A motif can be specified using [position weight
matrix](https://en.wikipedia.org/wiki/Position_weight_matrix) (PWM) in a
probabilistic way.
This method searches for the first position in the sequence where a score
calculated using a PWM is greater than or equal to a threshold.
More formally, denoting the sequence as ``S`` and the PWM value of symbol ``s``
at position ``j`` as ``M_{s,j}``, the score starting from a position ``p`` is
defined as
```math
\operatorname{score}(S, p) = \sum_{i=1}^L M_{S[p+i-1],i}
```
and the search returns the smallest ``p`` that satisfies
``\operatorname{score}(S, p) \ge t``.
There are two kinds of matrices in this package: `PFM` and `PWM`. The `PFM` type
is a position frequency matrix and stores symbol frequencies for each position.
The `PWM` is a position weight matrix and stores symbol scores for each
position. You can create a `PFM` from a set of sequences with the same length
and then create a `PWM` from the `PFM` object.
```jldoctest
julia> motifs = [dna"TTA", dna"CTA", dna"ACA", dna"TCA", dna"GTA"]
5-element Vector{LongSequence{DNAAlphabet{4}}}:
TTA
CTA
ACA
TCA
GTA
julia> pfm = PFM(motifs) # sequence set => PFM
4×3 PFM{DNA, Int64}:
A 1 0 5
C 1 2 0
G 1 0 0
T 2 3 0
julia> pwm = PWM(pfm) # PFM => PWM
4×3 PWM{DNA, Float64}:
A -0.321928 -Inf 2.0
C -0.321928 0.678072 -Inf
G -0.321928 -Inf -Inf
T 0.678072 1.26303 -Inf
julia> pwm = PWM(pfm .+ 0.01) # add pseudo counts to avoid infinite values
4×3 PWM{DNA, Float64}:
A -0.319068 -6.97728 1.99139
C -0.319068 0.673772 -6.97728
G -0.319068 -6.97728 -6.97728
T 0.673772 1.25634 -6.97728
julia> pwm = PWM(pfm .+ 0.01, prior=[0.2, 0.3, 0.3, 0.2]) # GC-rich prior
4×3 PWM{DNA, Float64}:
A 0.00285965 -6.65535 2.31331
C -0.582103 0.410737 -7.24031
G -0.582103 -7.24031 -7.24031
T 0.9957 1.57827 -6.65535
```
The ``PWM_{s,j}`` matrix is computed from ``PFM_{s,j}`` and the prior
probability ``p(s)`` as follows ([Wasserman2004]):
```math
\begin{align}
PWM_{s,j} &= \log_2 \frac{p(s,j)}{p(s)} \\
p(s,j) &= \frac{PFM_{s,j}}{\sum_{s'} PFM_{s',j}}.
\end{align}
```
However, if you just want to quickly conduct a search, constructing the PFM and
PWM is done for you as a convenience if you build a `PWMSearchQuery`, using a
collection of sequences:
```jldoctest
julia> motifs = [dna"TTA", dna"CTA", dna"ACA", dna"TCA", dna"GTA"]
5-element Vector{LongSequence{DNAAlphabet{4}}}:
TTA
CTA
ACA
TCA
GTA
julia> subject = dna"TATTATAATTA";
julia> qa = PWMSearchQuery(motifs, 1.0);
julia> findfirst(qa, subject)
3
julia> findall(qa, subject)
3-element Vector{Int64}:
3
5
9
```
[Wasserman2004]: https://doi.org/10.1038/nrg1315
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 4808 | ```@meta
CurrentModule = BioSequences
DocTestSetup = quote
using BioSequences
end
```
# Biological symbols
The `BioSequences` module reexports the biological symbol (character) types
that are provided by [BioSymbols.jl:](https://biojulia.dev/BioSymbols.jl/)
| Type | Meaning |
| :-------------- | :------------- |
| `DNA` | DNA nucleotide |
| `RNA` | RNA nucleotide |
| `AminoAcid` | Amino acid |
These symbols are elements of biological sequence types, just as characters are
elements of strings.
## DNA and RNA nucleotides
Set of nucleotide symbols in BioSequences covers IUPAC nucleotide base plus
a gap symbol:
| Symbol | Constant | Meaning |
| :----- | :-------------------- | :------------------------- |
| 'A' | `DNA_A` / `RNA_A` | A; Adenine |
| 'C' | `DNA_C` / `RNA_C` | C; Cytosine |
| 'G' | `DNA_G` / `RNA_G` | G; Guanine |
| 'T' | `DNA_T` | T; Thymine (DNA only) |
| 'U' | `RNA_U` | U; Uracil (RNA only) |
| 'M' | `DNA_M` / `RNA_M` | A or C |
| 'R' | `DNA_R` / `RNA_R` | A or G |
| 'W' | `DNA_W` / `RNA_W` | A or T/U |
| 'S' | `DNA_S` / `RNA_S` | C or G |
| 'Y' | `DNA_Y` / `RNA_Y` | C or T/U |
| 'K' | `DNA_K` / `RNA_K` | G or T/U |
| 'V' | `DNA_V` / `RNA_V` | A or C or G; not T/U |
| 'H' | `DNA_H` / `RNA_H` | A or C or T; not G |
| 'D' | `DNA_D` / `RNA_D` | A or G or T/U; not C |
| 'B' | `DNA_B` / `RNA_B` | C or G or T/U; not A |
| 'N' | `DNA_N` / `RNA_N` | A or C or G or T/U |
| '-' | `DNA_Gap` / `RNA_Gap` | Gap (none of the above) |
<https://www.bioinformatics.org/sms/iupac.html>
Symbols are accessible as constants with `DNA_` or `RNA_` prefix:
```jldoctest
julia> DNA_A
DNA_A
julia> DNA_T
DNA_T
julia> RNA_U
RNA_U
julia> DNA_Gap
DNA_Gap
julia> typeof(DNA_A)
DNA
julia> typeof(RNA_A)
RNA
```
Symbols can be constructed by converting regular characters:
```jldoctest
julia> convert(DNA, 'C')
DNA_C
julia> convert(DNA, 'C') === DNA_C
true
```
## Amino acids
Set of amino acid symbols also covers IUPAC amino acid symbols plus a gap symbol:
| Symbol | Constant | Meaning |
| :----------- | :-------------- | :-------------------------- |
| 'A' | `AA_A` | Alanine |
| 'R' | `AA_R` | Arginine |
| 'N' | `AA_N` | Asparagine |
| 'D' | `AA_D` | Aspartic acid (Aspartate) |
| 'C' | `AA_C` | Cysteine |
| 'Q' | `AA_Q` | Glutamine |
| 'E' | `AA_E` | Glutamic acid (Glutamate) |
| 'G' | `AA_G` | Glycine |
| 'H' | `AA_H` | Histidine |
| 'I' | `AA_I` | Isoleucine |
| 'L' | `AA_L` | Leucine |
| 'K' | `AA_K` | Lysine |
| 'M' | `AA_M` | Methionine |
| 'F' | `AA_F` | Phenylalanine |
| 'P' | `AA_P` | Proline |
| 'S' | `AA_S` | Serine |
| 'T' | `AA_T` | Threonine |
| 'W' | `AA_W` | Tryptophan |
| 'Y' | `AA_Y` | Tyrosine |
| 'V' | `AA_V` | Valine |
| 'O' | `AA_O` | Pyrrolysine |
| 'U' | `AA_U` | Selenocysteine |
| 'B' | `AA_B` | Aspartic acid or Asparagine |
| 'J' | `AA_J` | Leucine or Isoleucine |
| 'Z' | `AA_Z` | Glutamine or Glutamic acid |
| 'X' | `AA_X` | Any amino acid |
| '*' | `AA_Term` | Termination codon |
| '-' | `AA_Gap` | Gap (none of the above) |
<https://www.bioinformatics.org/sms/iupac.html>
Symbols are accessible as constants with `AA_` prefix:
```jldoctest
julia> AA_A
AA_A
julia> AA_Q
AA_Q
julia> AA_Term
AA_Term
julia> typeof(AA_A)
AminoAcid
```
Symbols can be constructed by converting regular characters:
```jldoctest
julia> convert(AminoAcid, 'A')
AA_A
julia> convert(AminoAcid, 'P') === AA_P
true
```
## Other functions
```@docs
alphabet
gap
iscompatible
isambiguous
```
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 4970 | ```@meta
CurrentModule = BioSequences
DocTestSetup = quote
using BioSequences
end
```
# Indexing & modifying sequences
## Indexing
Most `BioSequence` concrete subtypes for the most part behave like other vector
or string types. They can be indexed using integers or ranges:
For example, with `LongSequence`s:
```jldoctest
julia> seq = dna"ACGTTTANAGTNNAGTACC"
19nt DNA Sequence:
ACGTTTANAGTNNAGTACC
julia> seq[5]
DNA_T
julia> seq[6:end]
14nt DNA Sequence:
TANAGTNNAGTACC
```
The biological symbol at a given locus in a biological sequence can be set using
setindex:
```jldoctest
julia> seq = dna"ACGTTTANAGTNNAGTACC"
19nt DNA Sequence:
ACGTTTANAGTNNAGTACC
julia> seq[5] = DNA_A
DNA_A
```
!!! note
Some types such can be indexed using integers but not using ranges.
For `LongSequence` types, indexing a sequence by range creates a copy of the
original sequence, similar to `Array` in Julia's `Base` library. If you find yourself
slowed down by the allocation of these subsequences, consider using a sequence view
instead.
## Modifying sequences
In addition to `setindex`, many other modifying operations are possible for
biological sequences such as `push!`, `pop!`, and `insert!`, which should be
familiar to anyone used to editing arrays.
```@docs
push!(::BioSequences.BioSequence, ::Any)
pop!(::BioSequences.BioSequence)
pushfirst!(::BioSequences.BioSequence, ::Any)
popfirst!(::BioSequences.BioSequence)
insert!(::BioSequences.BioSequence, ::Integer, ::Any)
deleteat!(::BioSequences.BioSequence, ::Integer)
append!(::BioSequences.BioSequence, ::BioSequences.BioSequence)
resize!(::BioSequences.LongSequence, ::Integer)
empty!(::BioSequences.BioSequence)
```
Here are some examples:
```jldoctest
julia> seq = dna"ACG"
3nt DNA Sequence:
ACG
julia> push!(seq, DNA_T)
4nt DNA Sequence:
ACGT
julia> append!(seq, dna"AT")
6nt DNA Sequence:
ACGTAT
julia> deleteat!(seq, 2)
5nt DNA Sequence:
AGTAT
julia> deleteat!(seq, 2:3)
3nt DNA Sequence:
AAT
```
### Additional transformations
In addition to these basic modifying functions, other sequence transformations
that are common in bioinformatics are also provided.
```@docs
reverse!(::BioSequences.LongSequence)
reverse(::BioSequences.LongSequence{<:NucleicAcidAlphabet})
complement!
complement
reverse_complement!
reverse_complement
ungap!
ungap
canonical!
canonical
```
Some examples:
```jldoctest
julia> seq = dna"ACGTAT"
6nt DNA Sequence:
ACGTAT
julia> reverse!(seq)
6nt DNA Sequence:
TATGCA
julia> complement!(seq)
6nt DNA Sequence:
ATACGT
julia> reverse_complement!(seq)
6nt DNA Sequence:
ACGTAT
```
Many of these methods also have a version which makes a copy of the input
sequence, so you get a modified copy, and don't alter the original sequence.
Such methods are named the same, but without the exclamation mark.
E.g. `reverse` instead of `reverse!`, and `ungap` instead of `ungap!`.
#### Translation
Translation is a slightly more complex transformation for RNA Sequences and so
we describe it here in more detail.
The [`translate`](@ref) function translates a sequence of codons in a RNA sequence
to a amino acid sequence based on a genetic code. The `BioSequences` package
provides all NCBI defined genetic codes and they are registered in
[`ncbi_trans_table`](@ref).
```@docs
translate
ncbi_trans_table
```
```jldoctest
julia> ncbi_trans_table
Translation Tables:
1. The Standard Code (standard_genetic_code)
2. The Vertebrate Mitochondrial Code (vertebrate_mitochondrial_genetic_code)
3. The Yeast Mitochondrial Code (yeast_mitochondrial_genetic_code)
4. The Mold, Protozoan, and Coelenterate Mitochondrial Code and the Mycoplasma/Spiroplasma Code (mold_mitochondrial_genetic_code)
5. The Invertebrate Mitochondrial Code (invertebrate_mitochondrial_genetic_code)
6. The Ciliate, Dasycladacean and Hexamita Nuclear Code (ciliate_nuclear_genetic_code)
9. The Echinoderm and Flatworm Mitochondrial Code (echinoderm_mitochondrial_genetic_code)
10. The Euplotid Nuclear Code (euplotid_nuclear_genetic_code)
11. The Bacterial, Archaeal and Plant Plastid Code (bacterial_plastid_genetic_code)
12. The Alternative Yeast Nuclear Code (alternative_yeast_nuclear_genetic_code)
13. The Ascidian Mitochondrial Code (ascidian_mitochondrial_genetic_code)
14. The Alternative Flatworm Mitochondrial Code (alternative_flatworm_mitochondrial_genetic_code)
16. Chlorophycean Mitochondrial Code (chlorophycean_mitochondrial_genetic_code)
21. Trematode Mitochondrial Code (trematode_mitochondrial_genetic_code)
22. Scenedesmus obliquus Mitochondrial Code (scenedesmus_obliquus_mitochondrial_genetic_code)
23. Thraustochytrium Mitochondrial Code (thraustochytrium_mitochondrial_genetic_code)
24. Pterobranchia Mitochondrial Code (pterobrachia_mitochondrial_genetic_code)
25. Candidate Division SR1 and Gracilibacteria Code (candidate_division_sr1_genetic_code)
```
<https://www.ncbi.nlm.nih.gov/Taxonomy/taxonomyhome.html/index.cgi?chapter=cgencodes>
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 3.1.6 | 6fdba8b4279460fef5674e9aa2dac7ef5be361d5 | docs | 2952 | ```@meta
CurrentModule = BioSequences
DocTestSetup = quote
using BioSequences
end
```
# Abstract Types
BioSequences exports an abstract `BioSequence` type, and several concrete sequence
types which inherit from it.
## The abstract BioSequence
BioSequences provides an abstract type called a `BioSequence{A<:Alphabet}`.
This abstract type, and the methods and traits is supports, allows for
many algorithms in BioSequences to be written as generically as possible,
thus reducing the amount of code to read and understand, whilst maintaining high
performance when such code is compiled for a concrete BioSequence subtype.
Additionally, it allows new types to be implemented that are fully compatible
with the rest of BioSequences, providing that key methods or traits are defined).
```@docs
BioSequence
```
Some aliases for `BioSequence` are also provided for your convenience:
```@docs
NucSeq
AASeq
```
Let's have a closer look at some of those methods that a subtype of `BioSequence`
must implement. Check out julia base library docs for `length`, `copy` and `resize!`.
```@docs
encoded_data_eltype
extract_encoded_element
encoded_setindex!
```
A correctly defined subtype of `BioSequence` that satisfies the interface, will
find the vast majority of methods described in the rest of this manual
should work out of the box for that type. But they can always be overloaded if
needed. Indeed the `LongSequence` type overloads Indeed some of the generic
`BioSequence` methods, are overloaded for `LongSequence`, for example
for transformation and counting operations where efficiency gains can be made
due to the specific internal representation of a specific type.
## The abstract Alphabet
Alphabets control how biological symbols are encoded and decoded.
They also confer many of the automatic traits and methods that any subtype
of `T<:BioSequence{A<:Alphabet}` will get.
```@docs
BioSequences.Alphabet
BioSequences.AsciiAlphabet
```
# Concrete types
## Implemented alphabets
```@docs
DNAAlphabet
RNAAlphabet
AminoAcidAlphabet
```
## Long Sequences
```@docs
LongSequence
```
## Sequence views
Similar to how Base Julia offers views of array objects, BioSequences offers view of
`LongSequence`s - the `LongSubSeq{A<:Alphabet}`.
Conceptually, a `LongSubSeq{A}` is similar to a `LongSequence{A}`, but instead of storing
their own data, they refer to the data of a `LongSequence`. Modiying the `LongSequence`
will be reflected in the view, and vice versa. If the underlying `LongSequence`
is truncated, the behaviour of a view is undefined. For the same reason,
some operations are not supported for views, such as resizing.
The purpose of `LongSubSeq` is that, since they only contain a pointer to the
underlying array, an offset and a length, they are much lighter than `LongSequences`,
and will be stack allocated on Julia 1.5 and newer. Thus, the user may construct
millions of views without major performance implications.
| BioSequences | https://github.com/BioJulia/BioSequences.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | code | 495 | push!(LOAD_PATH, "../src/")
using Documenter, CJKFrequencies
makedocs(
sitename="CJKFrequencies.jl Documentation",
format=Documenter.HTML(
prettyurls=get(ENV, "CI", nothing) == "true"
),
modules=[CJKFrequencies],
pages=[
"Home" => "index.md",
"API Reference" => "api_reference.md",
"Developer Docs" => "devdocs.md"
]
)
deploydocs(
repo = "github.com/tmthyln/CJKFrequencies.jl.git",
devbranch = "main",
devurl="latest"
)
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | code | 253 | module CJKFrequencies
using LightXML
using DataStructures
using LazyArtifacts
export CJKFrequency, charfreq,
SimplifiedLCMC, SimplifiedJunDa,
Lexicon, tagged_with
include("charfreq.jl")
include("frequency_datasets.jl")
include("lexicon.jl")
end
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | code | 3191 |
"""
Accumulator-like data structure for storing frequencies of CJK words (although other tokens
can be stored as well).
You generally don't need to explicitly call this struct's constructor yourself; rather, use
the `charfreq` function.
"""
struct CJKFrequency{S <: AbstractString, C <: Number}
freq::Accumulator{S, C}
size::Ref{C}
function CJKFrequency(frequencies=Dict{String, Int}())
key_type = eltype(keys(frequencies))
val_type = eltype(values(frequencies))
new{key_type, val_type}(counter(frequencies), Ref(sum(values(frequencies))))
end
end
# mutation
function DataStructures.inc!(cf::CJKFrequency, key, count=1)
inc!(cf.freq, key, count)
cf.size[] += count
end
function DataStructures.dec!(cf::CJKFrequency, key, count=1)
if key in keys(cf.freq)
cf.size[] -= cf.freq[key] - max(0, cf.freq[key] - count)
dec!(cf.freq, key, min(count, cf.freq[key]))
end
end
function DataStructures.reset!(cf::CJKFrequency, key)
key in keys(cf.freq) && (size[] -= cf.freq[key])
reset!(cf.freq, key)
end
# collections operations
Base.isempty(cf::CJKFrequency) = isempty(cf.freq)
function Base.empty!(cf::CJKFrequency)
empty!(cf.freq)
size = 0
return cf
end
Base.in(item, cf::CJKFrequency) = in(item, cf.freq)
# dictionaries
Base.haskey(cf::CJKFrequency, key) = haskey(cf.freq, key)
Base.keys(cf::CJKFrequency) = keys(cf.freq)
Base.values(cf::CJKFrequency) = values(cf.freq)
# indexing
Base.getindex(cf::CJKFrequency, i) = getindex(cf.freq, i)
Base.firstindex(cf::CJKFrequency) = firstindex(cf.freq)
Base.lastindex(cf::CJKFrequency) = lastindex(cf.freq)
# iteration
Base.iterate(cf::CJKFrequency) = iterate(cf.freq)
Base.iterate(cf::CJKFrequency, state) = iterate(cf.freq, state)
Base.length(cf::CJKFrequency) = length(cf.freq)
Base.size(cf::CJKFrequency) = cf.size[]
"""
charfreq(text)
charfreq(charfreq_type)
Create a character frequency mapping from either text or load it from a default location for
pre-specified character frequency datasets (e.g. `SimplifiedLCMC`, `SimplifiedJunDa`, etc.).
## Examples
When creating a character frequency from text, this method behaves almost exactly like
`DataStructures.counter` except that the return value always has type `CharacterFrequency`
(`Accumulator{String, Int}`).
```julia-repl
julia> text = split("王老师性格内向,沉默寡言,我除在课外活动小组“文学研究会”听过他一次报告,并听-邓知识渊博,是“老师的老师”外,对他一无所知。所以,研读他的作", "");
julia> charfreq(text)
CJKFrequency{SubString{String}, Int64}(Accumulator(除 => 1, 报 => 1, 是 => 1, 知 => 2, 并 => 1, 性 => 1, , => 6, 言 => 1, 邓 => 1, 外 => 2, 所 => 2, 对 => 1, 动 => 1, 寡 => 1, 。 => 1, 渊 => 1, 学 => 1, - => 1, 听 => 2, 我 => 1, 次 => 1, 一 => 2, 读 => 1, 作 => 1, 格 => 1, “ => 2, 博 => 1, 课 => 1, 老 => 3, 会 => 1, 告 => 1, 无 => 1, 活 => 1, 组 => 1, 内 => 1, 师 => 3, 的 => 2, 小 => 1, 文 => 1, 默 => 1, 究 => 1, 过 => 1, 在 => 1, 以 => 1, ” => 2, 研 => 2, 他 => 3, 向 => 1, 沉 => 1, 王 => 1), Base.RefValue{Int64}(71))
```
See the documentation for individual character frequency dataset structs for examples of the
second case.
"""
function charfreq end
charfreq(tokens) = CJKFrequency(counter(tokens))
charfreq(text::AbstractString) = CJKFrequency(counter(split(text, "")))
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | code | 4679 |
#==============================================================================#
#= Simplified LCMC =#
#==============================================================================#
const LCMC_CATEGORIES = Dict(
'A' => "Press: reportage",
'B' => "Press: editorials",
'C' => "Press: reviews",
'D' => "Religion",
'E' => "Skills, trades and hobbies",
'F' => "Popular lore",
'G' => "Biographies and essays",
'H' => "Miscellaneous: reports and official documents",
'J' => "Science: academic prose",
'K' => "General fiction",
'L' => "Mystery and detective fiction",
'M' => "Science fiction",
'N' => "Adventure and martial arts fiction",
'P' => "Romantic fiction",
'R' => "Humour")
"""
SimplifiedLCMC([categories])
A character frequency dataset: Lancaster Corpus for Mandarin Chinese, simplified terms only,
based on simplified text corpus. See their
[website](https://www.lancaster.ac.uk/fass/projects/corpus/LCMC/default.htm) for more details about the corpus.
The character frequency can be based only on selected categories (see `CJKFrequencies.LCMC_CATEGORIES` for valid
category keys and corresponding category names). Any invalid categories will be ignored.
## Examples
Loading all the categories:
```julia-repl
julia> charfreq(SimplifiedLCMC())
DataStructures.Accumulator{String,Int64} with 45411 entries:
"一路… => 1
"舍得" => 9
"58" => 1
"神农… => 1
"十点" => 8
"随从" => 9
"荡心… => 1
"尺码" => 1
⋮ => ⋮
```
Or loading just a subset (argument can be any iterable):
```julia-repl
julia> charfreq(SimplifiedLCMC("ABEGKLMNR"))
DataStructures.Accumulator{String,Int64} with 35488 entries:
"废… => 1
"蜷" => 1
"哇" => 13
"丰… => 1
"弊… => 3
"议… => 10
"滴" => 28
"美… => 1
⋮ => ⋮
```
## Licensing/Copyright
Note: This corpus has some conflicting licensing information, depending on who is supplying the
data.
The original corpus is provided primarily for non-profit-making research. Be sure to see the full
[end user license agreement](https://www.lancaster.ac.uk/fass/projects/corpus/LCMC/lcmc/lcmc_license.htm).
Via the
[Oxford Text Archive](https://ota.bodleian.ox.ac.uk/repository/xmlui/handle/20.500.12024/2474),
this corpus is distributed under the
[CC BY-NC-SA 3.0](http://creativecommons.org/licenses/by-nc-sa/3.0/) license.
"""
struct SimplifiedLCMC
categories::Set{Char}
function SimplifiedLCMC(cats)
lcmc = new(Set{String}())
for cat in cats
haskey(LCMC_CATEGORIES, cat) && push!(lcmc.categories, cat)
end
lcmc
end
SimplifiedLCMC() = new(keys(LCMC_CATEGORIES))
end
function charfreq(lcmc::SimplifiedLCMC)
cf = CJKFrequency()
for cat in lcmc.categories
filename = joinpath(artifact"lcmc", "LCMC_$(cat).XML")
doc = parse_file(filename)
_words_from_xml(root(doc), cf)
end
cf
end
function _words_from_xml(xml_elem, accum)
for c in child_nodes(xml_elem)
if name(c) == "w"
inc!(accum, content(c))
else
_words_from_xml(c, accum)
end
end
end
#==============================================================================#
#= Simplified Jun Da =#
#==============================================================================#
"""
SimplifiedJunDa()
A character frequency
[dataset](https://lingua.mtsu.edu/chinese-computing/)
of modern Chinese compiled by Jun Da, simplified single-character
words only.
Currently, only the modern Chinese dataset is fetched; however, in the future, the other lists may
also be provided as an option.
## Examples
```julia-repl
julia> charfreq(SimplifiedJunDa())
DataStructures.Accumulator{String,Int64} with 9932 entries:
"蜷… => 837
"哇… => 4055
"湓… => 62
"滴… => 8104
"堞… => 74
"狭… => 6901
"尚… => 38376
"懈… => 2893
⋮ => ⋮
```
## Licensing/Copyright
The original author maintains full copyright to the character frequency lists, but provides the
lists for research and teaching/learning purposes only, no commercial use without permission from
the author. See their full disclaimer and copyright notice [here](https://lingua.mtsu.edu/chinese-computing/copyright.html).
"""
struct SimplifiedJunDa end
function charfreq(::SimplifiedJunDa)
cf = CJKFrequency()
pattern = r"^\d+\s+(\w)\s+(\d+)\s+\d+(?:\.\d+)\s+.+$"
for line in eachline(joinpath(artifact"junda", "freq.txt"))
m = match(pattern, line)
m !== nothing && inc!(cf, m.captures[1], Base.parse(Int, m.captures[2]))
end
cf
end
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | code | 2183 |
struct Lexicon
words::Dict{String, Set{String}}
tags::Set{String}
end
"""
Lexicon()
Lexicon(io_or_filename)
Lexicon(words)
Construct a lexicon. It can be empty (no parameters) or created from some IO-like object or a
sequence/iterable of words.
A lexicon is a list of (known) words, each of which can be tagged with various tags (e.g.
indicating how it is known, etc.).
"""
function Lexicon end
Lexicon(words) = Lexicon(Dict(word => Set{String}() for word in words), Set{String}())
Lexicon() = Lexicon(Dict{String, Set{String}}(), Set{String}())
function Lexicon(io::Union{IO, AbstractString})
lex = Lexicon()
for line in eachline(io)
char, tag_string = split(line, ":")
tags = split(tag_string, ",")
push!(lex, char; tags=tags)
end
lex
end
Base.length(lex::Lexicon) = length(lex.words)
Base.in(item, lex::Lexicon) = haskey(lex.words, item)
Base.getindex(lex::Lexicon, index) = getindex(lex.words, index)
Base.print(io::IO, lex::Lexicon) =
for (word, tags) in lex.words
println(io, "$word:$(join(tags, ","))")
end
function Base.push!(lex::Lexicon, words...; tags=())
for word in words
haskey(lex.words, word) || (lex.words[word] = Set{String}())
for tag in tags
push!(lex.words[word], tag)
end
end
for tag in tags
push!(lex.tags, tag)
end
nothing
end
tagged_with(lex::Lexicon, tag) =
[word for (word, tags) in lex.words if tag in tags]
"""
coverage(lexicon, charfreq)
coverage(lexicon, text)
Compute a lexicon's coverage of a text (possibly via a precomputed character
frequency dictionary). Both token and type coverage are provided.
"""
function coverage(lex::Lexicon, cf::Accumulator)
known_tokens, total_tokens, known_types, total_types = 0, 0, 0, length(lex)
for (char, freq) in cf
if char in lex
known_tokens += freq
known_types += 1
end
total_tokens += freq
total_types += 1
end
(char_coverage=Float(known_tokens)/total_tokens, type_coverage=Float(known_types)/total_types)
end
coverage(lex::Lexicon, text) = coverage(charfreq(text), lex)
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | code | 580 |
@testset "char freq from text" begin
test_text = "十七道身影,十七道白色的身影,宛如星丸跳跃一般从山腰处朝山顶方向而来,这十七道身影的主人,年纪最小的也超过了五旬,一个个神色凝重,他们身穿的白袍代表的是内门,而胸前那金色的唐字则是唐门长老的象征。"
cf = charfreq(test_text)
@test "十" in keys(cf) && cf["十"] == 3
@test length(cf) == 65
end
@testset "char freq from iterable" begin
tokens = ["a", "b", "a", "c", "b", "a", "c", "b", "a"]
cf = charfreq(tokens)
@test "a" in keys(cf) && cf["a"] == 4
@test "b" in keys(cf) && cf["b"] == 3
@test "c" in keys(cf) && cf["c"] == 2
@test length(cf) == 3
@test size(cf) == length(tokens)
end
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | code | 238 |
@testset "load lcmc w/ no errors" begin
lcmc_cf = charfreq(SimplifiedLCMC())
@test length(lcmc_cf) != 0
end
@testset "load junda w/ no errors" begin
junda_cf = charfreq(SimplifiedJunDa())
@test length(junda_cf) != 0
end
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | code | 2291 |
@testset "lexicon plain text IO" begin
# test basic lexicon reading
@testset "basic lexicon reading" begin
lex = Lexicon("res/lexicon-0.txt")
for elem in ["word", "tagless", "target", "lexicon", "pencil", "pen", "manytags"]
@test elem in lex
end
for elem in ["word", "target", "pencil", "pen"]
@test elem in tagged_with(lex, "tag2")
end
for elem in ["tagless", "lexicon", "manytags"]
@test !(elem in tagged_with(lex, "tag2"))
end
for tag in ["tag1", "tag2", "meta", "unique", "tag", "another", "more", "evenmore", "lots",
"toomany"]
@test tag in lex.tags
end
for tag in ["tag", "another", "more", "evenmore", "lots", "toomany"]
@test tag in lex["manytags"]
end
end
# words and tags with special characters
@testset "reading lexicons with special chars" begin
lex = Lexicon("res/lexicon-1.txt")
for elem in ["with space", "with,comma", "multipart,sentence with both", "word"]
@test elem in lex
end
for tag in ["symbol", "whitespace"]
@test tag in lex["multipart,sentence with both"]
end
@test "spaced tag" in lex["word"]
@test !("spaced" in lex["word"])
@test !("tag" in lex["word"])
end
# writing and reading a lexicon from file
@testset "lexicon writing and reading" begin
lex = Lexicon()
push!(lex, "command", "exception", "ide", tags=("computer",))
push!(lex, "don't know where this word comes from")
push!(lex, "pencil", tags=("stationary",))
push!(lex, "two category item", tags=("category1", "category2"))
file = IOBuffer()
println(file, lex)
read_lex = Lexicon(file)
for elem in ["command", "exception", "ide", "pencil"]
@test elem in lex
end
for tag in ["category1", "category2"]
@test tag in lex["two category item"]
end
@test "stationary" in lex["pencil"]
for elem in ["command", "exception", "ide"]
@test "computer" in lex[elem]
end
end
end
@testset "lexicon IO (other formats)" begin
end
@testset "coverage under lexicon" begin
end
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | code | 168 | using CJKFrequencies
using Test
@testset "all tests" begin
include("charfreq_tests.jl")
include("freq_datasets_tests.jl")
include("lexicon_tests.jl")
end
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | docs | 969 | # CJKFrequencies.jl

[](https://tmthyln.github.io/CJKFrequencies.jl/latest/)

A Julia package that provides some convenience functions for working with character/term frequencies of CJK language corpora (currently only implemented for specific Chinese frequency datasets, but see the docs for how to add more).
See the [documentation](https://tmthyln.github.io/CJKFrequencies.jl/latest/) for more details.
# Licensing
The package is provided under the MIT License, but the associated data used to load different character frequencies are licensed under various other licenses, depending on the source of the data. See the documentation for details on licensing for each data set.
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | docs | 562 | # API Reference
A character frequency can be computed or loaded via the `charfreq` function, either from some text or a predefined corpus.
```@docs
charfreq
```
## Supported Predefined Character Frequency Datasets
A Chinese character frequency dataset's `struct`'s name will be prefixed with either `Traditional` or `Simplified` depending on whether it is based on a traditional or simplified text corpus.
```@docs
SimplifiedLCMC
SimplifiedJunDa
```
Other data sets are planned to be added. To add a data set to this API, see the [Developer Docs](@ref) page.
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | docs | 578 | # Developer Docs
To add a new character frequency dataset using this API, a method just needs to be added to the `charfreq` function. It should have the signature
```julia
function charfreq(cf::CustomDataSetStruct)::CharacterFrequency end
```
where the `CustomDataSetStruct` is any struct that you define for that particular character frequency data set. If needed, the constructor for the struct should take any arguments (e.g. see [`SimplifiedLCMC`](@ref) for an example with arguments).
The return value should have type `CharacterFrequency` (`Accumulator{String, Int}`).
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 0.2.0 | 49b02f1e6a54e4397720f84e95386e7eabb4ca71 | docs | 451 | # CJKFrequencies.jl Documentation
This package primarily provides the function `charfreq` for computing and loading character frequencies of CJK-like languages.
Note that while the source code for this package is MIT-licensed, some of the character frequency datasets are not. The licensing/copyright information for each dataset is listed under the respective `struct` on [Supported Predefined Character Frequency Datasets](@ref).
```@contents
```
| CJKFrequencies | https://github.com/JuliaCJK/CJKFrequencies.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 1582 | using Pkg, Downloads, TreeArchival, SHA, Base.BinaryPlatforms
root_url = "https://github.com/dbhi/qus/releases/download/v0.0.11-v7.1%2Bdfsg-2--bpo11%2B3"
qemu_host_maps = (
"x86_64" => "amd64",
"aarch64" => "arm64v8",
"armv7l" => "arm32v7",
"ppc64le" => "ppc64le",
)
qemu_target_arch_list = [
"x86_64",
"i386",
"aarch64",
"arm",
"ppc64le",
]
for (host_arch, tarball_arch) in qemu_host_maps
for target_arch in qemu_target_arch_list
# First, download the tarball
mktempdir() do dir
url = "$(root_url)/qemu-$(target_arch)-static_$(tarball_arch).tgz"
file_path = joinpath(dir, basename(url))
Downloads.download(url, file_path)
# Get the tarball and tree hashes
tarball_hash = bytes2hex(open(SHA.sha256, file_path))
tree_hash = Base.SHA1(TreeArchival.treehash(file_path))
artifacts_toml = Pkg.Artifacts.find_artifacts_toml(dirname(@__DIR__))
# Because this is technically a static executable, we drop the implicit `libc` constraint
# so that it matches both `glibc` and `musl` hosts:
host_platform = Platform(host_arch, "linux")
delete!(tags(host_platform), "libc")
Pkg.Artifacts.bind_artifact!(
artifacts_toml,
"qemu-$(target_arch)",
tree_hash;
platform=host_platform,
download_info=[(url, tarball_hash)],
lazy=true,
force=true,
)
end
end
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 1377 | #!/usr/bin/env julia
using Sandbox, Pkg.Artifacts, Scratch, SHA, ghr_jll
if isempty(ARGS)
println("Usage: julia --project build_docker_image.jl <dir>")
println(" Where <dir> is something like `debian-julia-python3`")
end
image_name = ARGS[1]
run(`docker build -t $(image_name) $(image_name)`)
artifact_hash = create_artifact() do dir
@info("Building $(image_name)")
Sandbox.export_docker_image(image_name, dir; verbose=true)
@info("Hashing")
end
# Write out to a file
tarball_path = joinpath(@get_scratch!("archived"), "$(image_name).tar.gz")
@info("Archiving out to $(tarball_path)")
archive_artifact(artifact_hash, tarball_path)
# Hash the tarball
@info("Hashing tarball")
tarball_hash = open(io -> bytes2hex(sha256(io)), tarball_path)
# Upload to `staticfloat/Sandbox.jl`, create a tag based on this docker image
tag_name = "$(image_name)-$(bytes2hex(artifact_hash.bytes[end-3:end]))"
@info("Uploading to staticfloat/Sandbox.jl@$(tag_name)")
run(`$(ghr_jll.ghr()) -replace $(tag_name) $(tarball_path)`)
# Bind it into `Artifacts.toml`
tarball_url = "https://github.com/staticfloat/Sandbox.jl/releases/download/$(tag_name)/$(basename(tarball_path))"
bind_artifact!(
joinpath(dirname(@__DIR__), "Artifacts.toml"),
"$(image_name)-rootfs",
artifact_hash;
download_info=[(tarball_url, tarball_hash)],
lazy=true,
force=true,
)
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 2343 | using Pkg.Artifacts, SHA, Scratch, ghr_jll, Base.BinaryPlatforms, Downloads, TreeArchival
## Download and create our `multiarch-testing` artifact, which contains HelloWorldC_jll for all our architectures.
HWC_version = v"1.3.0+0"
HWC_platforms = (
Platform("x86_64", "linux"; libc="glibc"),
Platform("x86_64", "linux"; libc="musl"),
Platform("i686", "linux"; libc="glibc"),
Platform("i686", "linux"; libc="musl"),
Platform("aarch64", "linux"; libc="glibc"),
Platform("aarch64", "linux"; libc="musl"),
Platform("armv7l", "linux"; libc="glibc"),
Platform("armv7l", "linux"; libc="musl"),
Platform("powerpc64le", "linux"; libc="glibc"),
# We don't have this one yet
#Platform("powerpc64le", "linux"; libc="musl"),
)
artifact_hash = create_artifact() do dir
for platform in HWC_platforms
url = "https://github.com/JuliaBinaryWrappers/HelloWorldC_jll.jl/releases/download/HelloWorldC-v$(HWC_version)/HelloWorldC.v$(HWC_version.major).$(HWC_version.minor).$(HWC_version.patch).$(triplet(platform)).tar.gz"
mktempdir() do temp_dir
tarball_path = joinpath(temp_dir, basename(url))
Downloads.download(url, tarball_path)
TreeArchival.unarchive(tarball_path, joinpath(temp_dir, "out"))
try
mv(joinpath(temp_dir, "out", "bin", "hello_world"), joinpath(dir, "hello_world.$(triplet(platform))"))
catch
end
end
end
end
@info("Archiving")
tarball_path = joinpath(@get_scratch!("archived"), "multiarch-testing.tar.gz")
archive_artifact(artifact_hash, tarball_path)
# Hash the tarball
@info("Hashing tarball")
tarball_hash = open(io -> bytes2hex(sha256(io)), tarball_path)
# Upload it to `staticfloat/Sandbox.jl`
tag_name = "multiarch-testing-$(bytes2hex(artifact_hash.bytes[end-3:end]))"
@info("Uploading to staticfloat/Sandbox.jl@$(tag_name)")
run(`$(ghr_jll.ghr()) -replace $(tag_name) $(tarball_path)`)
tarball_url = "https://github.com/staticfloat/Sandbox.jl/releases/download/$(tag_name)/$(basename(tarball_path))"
# Bind it into our Artifacts.toml (advertising support for both glibc and musl)
bind_artifact!(
joinpath(@__DIR__, "..", "Artifacts.toml"),
"multiarch-testing",
artifact_hash;
download_info=[(tarball_url, tarball_hash)],
force=true,
lazy=true,
)
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 2775 | #!/usr/bin/env julia
# This is an example invocation of `debootstrap` to generate a Debian/Ubuntu-based rootfs
using Scratch, Tar, Pkg, Pkg.Artifacts, ghr_jll, SHA
if Sys.which("debootstrap") === nothing
error("Must install `debootstrap`!")
end
# Utility functions
getuid() = ccall(:getuid, Cint, ())
getgid() = ccall(:getgid, Cint, ())
artifact_hash = create_artifact() do rootfs
release = "buster"
@info("Running debootstrap")
run(`sudo debootstrap --variant=minbase --include=locales $(release) "$(rootfs)"`)
# Remove special `dev` files
@info("Cleaning up `/dev`")
for f in readdir(joinpath(rootfs, "dev"); join=true)
# Keep the symlinks around (such as `/dev/fd`), as they're useful
if !islink(f)
run(`sudo rm -rf "$(f)"`)
end
end
# take ownership of the entire rootfs
@info("Chown'ing rootfs")
run(`sudo chown $(getuid()):$(getgid()) -R "$(rootfs)"`)
# Write out a reasonable default resolv.conf
open(joinpath(rootfs, "etc", "resolv.conf"), write=true) do io
write(io, """
nameserver 1.1.1.1
nameserver 8.8.8.8
nameserver 8.8.4.4
nameserver 4.4.4.4
""")
end
# Remove `_apt` user so that `apt` doesn't try to `setgroups()`
@info("Removing `_apt` user")
open(joinpath(rootfs, "etc", "passwd"), write=true, read=true) do io
filtered_lines = filter(l -> !startswith(l, "_apt:"), readlines(io))
truncate(io, 0)
seek(io, 0)
for l in filtered_lines
println(io, l)
end
end
# Set up the one true locale
@info("Setting up UTF-8 locale")
open(joinpath(rootfs, "etc", "locale.gen"), "a") do io
println(io, "en_US.UTF-8 UTF-8")
end
@info("Regenerating locale")
run(`sudo chroot --userspec=$(getuid()):$(getgid()) $(rootfs) locale-gen`)
@info("Done!")
end
# Archive it into a `.tar.gz` file
@info("Archiving")
tarball_path = joinpath(@get_scratch!("archived"), "debian_minimal.tar.gz")
archive_artifact(artifact_hash, tarball_path)
# Hash the tarball
@info("Hashing tarball")
tarball_hash = open(io -> bytes2hex(sha256(io)), tarball_path)
# Upload it to `staticfloat/Sandbox.jl`
tag_name = "debian-minimal-$(bytes2hex(artifact_hash.bytes[end-3:end]))"
@info("Uploading to staticfloat/Sandbox.jl@$(tag_name)")
run(`$(ghr_jll.ghr()) -replace $(tag_name) $(tarball_path)`)
# Bind this artifact into our Artifacts.toml
tarball_url = "https://github.com/staticfloat/Sandbox.jl/releases/download/$(tag_name)/$(basename(tarball_path))"
bind_artifact!(
joinpath(dirname(@__DIR__), "Artifacts.toml"),
"debian-minimal-rootfs",
artifact_hash;
download_info=[(tarball_url, tarball_hash)],
lazy=true,
force=true,
)
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 653 | using Pkg, Scratch, Preferences
# Use `cc` to build `sandbox.c` into a scratch space owned by `Sandbox`
sandbox_uuid = Base.UUID("9307e30f-c43e-9ca7-d17c-c2dc59df670d")
sdir = get_scratch!(sandbox_uuid, "local_sandbox")
run(`make -C $(@__DIR__) -j$(Sys.CPU_THREADS) bindir=$(sdir)`)
# Tell UserNSSandbox_jll to load our `sandbox` instead of the default artifact one
jll_uuid = Base.UUID("b88861f7-1d72-59dd-91e7-a8cc876a4984")
set_preferences!(
jll_uuid,
"sandbox_path" => joinpath(sdir, "userns_sandbox");
force=true,
)
set_preferences!(
jll_uuid,
"overlay_probe_path" => joinpath(sdir, "userns_overlay_probe");
force=true,
)
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 209 | using Documenter, Sandbox
makedocs(
modules = [Sandbox],
sitename = "Sandbox.jl",
)
deploydocs(
repo = "github.com/staticfloat/Sandbox.jl.git",
push_preview = true,
devbranch = "main",
)
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 13048 | using Random, Tar
import Tar_jll
Base.@kwdef mutable struct DockerExecutor <: SandboxExecutor
label::String = Random.randstring(10)
privileges::Symbol = :privileged
end
function docker_exe()
return load_env_pref(
"SANDBOX_DOCKER_EXE",
"docker_exe",
something(Sys.which.(("docker", "podman"))..., Some(nothing)),
)
end
function cleanup(exe::DockerExecutor)
success(`$(docker_exe()) system prune --force --filter=label=$(docker_image_label(exe))`)
end
Base.show(io::IO, exe::DockerExecutor) = write(io, "Docker Executor")
function executor_available(::Type{DockerExecutor}; verbose::Bool = false)
# don't even try to exec if it doesn't exist
if docker_exe() === nothing
if verbose
@info("No `docker` or `podman` command found; DockerExecutor unavailable")
end
return false
end
# Return true if we can `docker ps`; if we can't, then there's probably a permissions issue
if !success(`$(docker_exe()) ps`)
if verbose
@warn("Unable to run `$(docker_exe()) ps`; perhaps you're not in the `docker` group?")
end
return false
end
return with_executor(DockerExecutor) do exe
return probe_executor(exe; verbose)
end
end
timestamps_path() = joinpath(@get_scratch!("docker_timestamp_hashes"), "path_timestamps.toml")
function load_timestamps()
path = timestamps_path()
if !isfile(path)
return Dict()
end
try
return TOML.parsefile(path)
catch e
@error("couldn't load $(path)", exception=e)
return Dict()
end
end
function save_timestamp(image_name::String, timestamp::Float64)
timestamps_toml = entry = sprint() do io
timestamps = load_timestamps()
timestamps[image_name] = timestamp
TOML.print(io, timestamps)
end
open(timestamps_path(), write=true) do io
write(io, timestamps_toml)
end
end
function docker_image_name(paths::Dict{String,String}, uid::Cint, gid::Cint)
hash = foldl(⊻, vcat(Base._crc32c.(keys(paths))..., Base._crc32c.(values(paths))))
return "sandbox_rootfs:$(string(hash, base=16))-$(uid)-$(gid)"
end
docker_image_label(exe::DockerExecutor) = string("org.julialang.sandbox.jl=", exe.label)
function should_build_docker_image(paths::Dict{String,String}, uid::Cint, gid::Cint)
# If the image doesn't exist at all, always return true
image_name = docker_image_name(paths, uid, gid)
if !success(`$(docker_exe()) image inspect $(image_name)`)
return true
end
# If this image has been built before, compare its historical timestamp to the current one
prev_ctime = get(load_timestamps(), image_name, 0.0)
return any(max_directory_ctime(path) > prev_ctime for path in values(paths))
end
"""
build_docker_image(root_path::String)
Docker doesn't like volume mounts within volume mounts, like we do with `sandbox`.
So we do things "the docker way", where we construct a rootfs docker image, then mount
things on top of that, with no recursive mounting. We cut down on unnecessary work
somewhat by quick-scanning the directory for changes and only rebuilding if changes
are detected.
"""
function build_docker_image(mounts::Dict, uid::Cint, gid::Cint; verbose::Bool = false)
overlayed_paths = Dict(path => m.host_path for (path, m) in mounts if m.type == MountType.Overlayed)
image_name = docker_image_name(overlayed_paths, uid, gid)
if should_build_docker_image(overlayed_paths, uid, gid)
max_ctime = maximum(max_directory_ctime(path) for path in values(overlayed_paths))
if verbose
@info("Building docker image $(image_name) with max timestamp 0x$(string(round(UInt64, max_ctime), base=16))")
end
# We're going to tar up all Overlayed mounts, using `--transform` to convert our host paths
# to the required destination paths.
tar_cmds = String[]
append!(tar_cmds, ["--transform=s&$(host[2:end])&$(dst[2:end])&" for (dst, host) in overlayed_paths])
append!(tar_cmds, [host for (_, host) in overlayed_paths])
# Build the docker image
open(`$(docker_exe()) import - $(image_name)`, "w", verbose ? stdout : devnull) do io
# We need to record permissions, and therefore we cannot use Tar.jl.
# Some systems (e.g. macOS) ship with a BSD tar that does not support the
# `--owner` and `--group` command-line options. Therefore, if Tar_jll is
# available, we use the GNU tar provided by Tar_jll. If Tar_jll is not available,
# we fall back to the system tar.
tar = Tar_jll.is_available() ? Tar_jll.tar() : `tar`
run(pipeline(
`$(tar) -c --owner=$(uid) --group=$(gid) $(tar_cmds)`,
stdout=io,
stderr=verbose ? stderr : devnull,
))
end
# Record that we built it
save_timestamp(image_name, max_ctime)
end
return image_name
end
function commit_previous_run(exe::DockerExecutor, image_name::String)
ids = split(readchomp(`$(docker_exe()) ps -a --filter label=$(docker_image_label(exe)) --format "{{.ID}}"`))
if isempty(ids)
return image_name
end
# We'll take the first docker container ID that we get, as its the most recent, and commit it.
image_name = "sandbox_rootfs_persist:$(first(ids))"
run(`$(docker_exe()) commit $(first(ids)) $(image_name)`)
return image_name
end
function build_executor_command(exe::DockerExecutor, config::SandboxConfig, user_cmd::Cmd)
# Build the docker image that corresponds to this rootfs
image_name = build_docker_image(config.mounts, config.uid, config.gid; verbose=config.verbose)
if config.persist
# If this is a persistent run, check to see if any previous runs have happened from
# this executor, and if they have, we'll commit that previous run as a new image and
# use it instead of the "base" image.
image_name = commit_previous_run(exe, image_name)
end
# Begin building `docker` args
if exe.privileges === :privileged # this is the default
# pros: allows you to do nested execution. e.g. the ability to run `Sandbox` inside `Sandbox`
# cons: may allow processes inside the Docker container to access secure environment variables of processes outside the container
privilege_args = String["--privileged"]
elseif exe.privileges === :no_new_privileges
# pros: may prevent privilege escalation attempts
# cons: you won't be able to do nested execution
privilege_args = String["--security-opt", "no-new-privileges"]
elseif exe.privileges === :unprivileged
# cons: you won't be able to do nested execution; privilege escalation may still work
privilege_args = String[]
else
throw(ArgumentError("invalid value for exe.privileges: $(exe.privileges)"))
end
cmd_string = String[docker_exe(), "run", privilege_args..., "-i", "--label", docker_image_label(exe)]
# If we're doing a fully-interactive session, tell it to allocate a psuedo-TTY
if all(isa.((config.stdin, config.stdout, config.stderr), Base.TTY))
push!(cmd_string, "-t")
end
# Start in the right directory
append!(cmd_string, ["-w", config.pwd])
# Add in read-only mappings (skipping the rootfs)
overlay_mappings = String[]
for (sandbox_path, mount_info) in config.mounts
if sandbox_path == "/"
continue
end
local mount_type_str
if mount_info.type == MountType.ReadOnly
mount_type_str = ":ro"
elseif mount_info.type == MountType.Overlayed
continue
elseif mount_info.type == MountType.ReadWrite
mount_type_str = ""
else
throw(ArgumentError("Unknown mount type: $(mount_info.type)"))
end
append!(cmd_string, ["-v", "$(mount_info.host_path):$(sandbox_path)$(mount_type_str)"])
end
# Apply environment mappings, first from `config`, next from `user_cmd`.
for (k, v) in config.env
append!(cmd_string, ["-e", "$(k)=$(v)"])
end
if user_cmd.env !== nothing
for pair in user_cmd.env
append!(cmd_string, ["-e", pair])
end
end
# Add in entrypoint, if it is set
if config.entrypoint !== nothing
append!(cmd_string, ["--entrypoint", config.entrypoint])
end
if config.hostname !== nothing
append!(cmd_string, ["--hostname", config.hostname])
end
# For each platform requested by `multiarch`, ensure its matching interpreter is registered,
# but only if we're on Linux. If we're on some other platform, like macOS where Docker is
# implemented with a virtual machine, we just trust the docker folks to have set up the
# relevant `binfmt_misc` mappings properly.
if Sys.islinux()
register_requested_formats!(config.multiarch_formats; verbose=config.verbose)
end
# Set the user and group
append!(cmd_string, ["--user", "$(config.uid):$(config.gid)"])
# Finally, append the docker image name user-requested command string
push!(cmd_string, image_name)
append!(cmd_string, user_cmd.exec)
docker_cmd = Cmd(cmd_string)
# If the user has asked that this command be allowed to fail silently, pass that on
if user_cmd.ignorestatus
docker_cmd = ignorestatus(docker_cmd)
end
return docker_cmd
end
sanitize_key(name) = replace(name, ':' => '-')
"""
export_docker_image(image::String,
output_dir::String = <default scratch location>;
verbose::Bool = false,
force::Bool = false)
Exports the given docker image name to the requested output directory. Useful
for pulling down a known good rootfs image from Docker Hub, for future use by
Sandbox executors. If `force` is set to true, will overwrite a pre-existing
directory, otherwise will silently return.
"""
function export_docker_image(image_name::String,
output_dir::String = @get_scratch!("docker-$(sanitize_key(image_name))");
force::Bool = false,
verbose::Bool = false)
if ispath(output_dir) && !isempty(readdir(output_dir))
if force
rmdir(output_dir; force=true, recursive=true)
else
if verbose
@warn("Will not overwrite pre-existing directory $(output_dir)")
end
return output_dir
end
end
# Get a container ID ready to be passed to `docker export`
container_id = readchomp(`$(docker_exe()) create $(image_name) /bin/true`)
# Get the ID of that container (since we can't export by label, sadly)
if isempty(container_id)
if verbose
@warn("Unable to create conatiner based on $(image_name)")
end
return nothing
end
# Export the container filesystem to a directory
try
mkpath(output_dir)
open(`$(docker_exe()) export $(container_id)`) do tar_io
Tar.extract(tar_io, output_dir) do hdr
# Skip known troublesome files
return hdr.type ∉ (:chardev,)
end
end
finally
run(`$(docker_exe()) rm -f $(container_id)`)
end
return output_dir
end
"""
pull_docker_image(image::String,
output_dir::String = <default scratch location>;
platform::String = "",
verbose::Bool = false,
force::Bool = false)
Pulls and saves the given docker image name to the requested output directory.
Useful for pulling down a known good rootfs image from Docker Hub, for future use
by Sandbox executors. If `force` is set to true, will overwrite a pre-existing
directory, otherwise will silently return. Optionally specify the platform of the
image with `platform`.
"""
function pull_docker_image(image_name::String,
output_dir::String = @get_scratch!("docker-$(sanitize_key(image_name))");
platform::Union{String,Nothing} = nothing,
force::Bool = false,
verbose::Bool = false)
if ispath(output_dir) && !isempty(readdir(output_dir))
if force
rmdir(output_dir; force=true, recursive=true)
else
if verbose
@warn("Will not overwrite pre-existing directory $(output_dir)")
end
return output_dir
end
end
# Pull the latest version of the image
try
p = platform === nothing ? `` : `--platform $(platform)`
run(`$(docker_exe()) pull $(p) $(image_name)`)
catch e
@warn("Cannot pull", image_name, e)
return nothing
end
# Once the image is pulled, export it to given output directory
return export_docker_image(image_name, output_dir; force, verbose)
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 8309 | module Sandbox
using Preferences, Scratch, LazyArtifacts, TOML, Libdl
import Base: run, success
export SandboxExecutor, DockerExecutor, UserNamespacesExecutor, SandboxConfig,
preferred_executor, executor_available, probe_executor, run, cleanup, with_executor
using Base.BinaryPlatforms
# Include some utilities for things like file manipulation, uname() parsing, etc...
include("utils.jl")
"""
SandboxExecutor
This represents the base type for all execution backends within this package.
Valid concrete subtypes must implement at least the following methods:
* `T()`: no-argument constructor to ready an execution engine with all defaults.
* `executor_available(::DataType{T})`: Checks whether executor type `T` is available
on this system. For example, `UserNamespacesExecutor`s are only available on
Linux, and even then only on certain kernels. Availablility checks may run a
program to determine whether that executor is actually available.
* `build_executor_command(exe::T, config::SandboxConfig, cmd::Cmd)`: Builds the
`Cmd` object that, when run, executes the user's desired command within the given
sandbox. The `config` object contains all necessary metadata such as shard
mappings, environment variables, `stdin`/`stdout`/`stderr` redirection, etc...
* `cleanup(exe::T)`: Cleans up any persistent data storage that this executor may
have built up over the course of its execution.
Note that while you can manually construct and cleanup an executor, it is recommended
that users instead make use of the `with_executor()` convenience function:
with_executor(UnprivilegedUserNamespacesExecutor) do exe
run(exe, config, ...)
end
"""
abstract type SandboxExecutor; end
# Utilities to help with reading `binfmt_misc` entries in `/proc`
include("binfmt_misc.jl")
# Our SandboxConfig object, defining the environment sandboxed executions happen within
include("SandboxConfig.jl")
# Load the Docker executor
include("Docker.jl")
# Load the UserNamespace executor
include("UserNamespaces.jl")
all_executors = Type{<:SandboxExecutor}[
# We always prefer the UserNamespaces executor, if we can use it,
# and the unprivileged one most of all. Only after that do we try `docker`.
UnprivilegedUserNamespacesExecutor,
PrivilegedUserNamespacesExecutor,
DockerExecutor,
]
function select_executor(verbose::Bool)
# If `FORCE_SANDBOX_MODE` is set, we're a nested Sandbox.jl invocation, and we should always use whatever it says
executor = load_env_pref("FORCE_SANDBOX_MODE", "executor", nothing)
if executor !== nothing
executor = lowercase(executor)
if executor ∈ ("unprivilegedusernamespacesexecutor", "unprivileged", "userns")
return UnprivilegedUserNamespacesExecutor
elseif executor ∈ ("privilegedusernamespacesexecutor", "privileged")
return PrivilegedUserNamespacesExecutor
elseif executor ∈ ("dockerexecutor", "docker")
return DockerExecutor
end
end
# Otherwise, just try them in priority order
for executor in all_executors
if executor_available(executor; verbose)
return executor
end
end
error("Could not find any available executors for $(triplet(HostPlatform()))!")
end
_preferred_executor = nothing
const _preferred_executor_lock = ReentrantLock()
function preferred_executor(;verbose::Bool = false)
lock(_preferred_executor_lock) do
# If we've already asked this question, return the old answer
global _preferred_executor
if _preferred_executor === nothing
_preferred_executor = select_executor(verbose)
end
return _preferred_executor
end
end
# Helper function for warning about privileged execution trying to invoke `sudo`
function warn_priviledged(::PrivilegedUserNamespacesExecutor)
@info("Running privileged container via `sudo`, may ask for your password:", maxlog=1)
return nothing
end
warn_priviledged(::SandboxExecutor) = nothing
for f in (:run, :success)
@eval begin
function $f(exe::SandboxExecutor, config::SandboxConfig, user_cmd::Cmd)
# Because Julia 1.8+ closes IOBuffers like `stdout` and `stderr`, we create temporary
# IOBuffers that get copied over to the persistent `stdin`/`stdout` after the run is complete.
temp_stdout = isa(config.stdout, IOBuffer) ? IOBuffer() : config.stdout
temp_stderr = isa(config.stderr, IOBuffer) ? IOBuffer() : config.stderr
cmd = pipeline(build_executor_command(exe, config, user_cmd); config.stdin, stdout=temp_stdout, stderr=temp_stderr)
if config.verbose
@info("Running sandboxed command", user_cmd.exec)
end
warn_priviledged(exe)
ret = $f(cmd)
# If we were using temporary IOBuffers, write the result out to `config.std{out,err}`
if isa(temp_stdout, IOBuffer)
write(config.stdout, take!(temp_stdout))
end
if isa(temp_stderr, IOBuffer)
write(config.stderr, take!(temp_stderr))
end
return ret
end
end
end
"""
with_executor(f::Function, ::Type{<:SandboxExecutor} = preferred_executor(); kwargs...)
"""
function with_executor(f::F, ::Type{T} = preferred_executor();
kwargs...) where {F <: Function, T <: SandboxExecutor}
exe = T(; kwargs...)
try
return f(exe)
finally
cleanup(exe)
end
end
function probe_executor(executor::SandboxExecutor; verbose::Bool = false)
mktempdir() do tmpdir
rw_dir = joinpath(tmpdir, "rw")
mkpath(rw_dir)
mounts = Dict(
"/" => MountInfo(debian_rootfs(), MountType.Overlayed),
"/read_write" => MountInfo(rw_dir, MountType.ReadWrite),
)
# Do a quick test that this executor works
inner_cmd = """
echo 'hello julia'
echo 'read-write mapping successful' >> /read_write/foo
"""
cmd_stdout = IOBuffer()
cmd_stderr = IOBuffer()
config = SandboxConfig(
mounts,
Dict("PATH" => "/bin:/usr/bin");
stdout=cmd_stdout,
stderr=cmd_stderr,
verbose,
)
user_cmd = `/bin/bash -c "$(inner_cmd)"`
# Command should execute successfully
user_cmd = ignorestatus(user_cmd)
if !success(run(executor, config, user_cmd))
if verbose
cmd_stdout = String(take!(cmd_stdout))
cmd_stderr = String(take!(cmd_stderr))
@warn("Unable to run `sandbox` itself", cmd_stdout)
println(cmd_stderr)
end
return false
end
# stdout should contain "hello julia" as its own line
cmd_stdout = String(take!(cmd_stdout))
stdout_lines = split(cmd_stdout, "\n")
if !("hello julia" in stdout_lines)
cmd_stderr = String(take!(cmd_stderr))
stderr_lines = split(cmd_stderr, "\n")
if verbose
@warn(" -> Basic stdout sentinel missing!", stdout_lines, stderr_lines)
end
return false
end
foo_file = joinpath(joinpath(tmpdir, "rw", "foo"))
if !isfile(foo_file)
if verbose
@warn(" -> Read-write mapping sentinel file missing!")
end
return false
end
foo_file_contents = String(read(foo_file))
if foo_file_contents != "read-write mapping successful\n"
if verbose
@warn(" -> Read-write mapping data corrupted", foo_file_contents)
end
return false
end
return true
end
end
# Convenience function for other users who want to do some testing
function debian_rootfs(;platform=HostPlatform())
return @artifact_str("debian-minimal-rootfs-$(arch(platform))")
end
# The multiarch rootfs is truly multiarch
multiarch_rootfs(;platform=nothing) = artifact"multiarch-rootfs"
# Precompilation section
let
f(exe) = run(exe, SandboxConfig(Dict("/" => "/")), `/bin/bash -c exit`)
precompile(select_executor, (Bool,))
precompile(with_executor, (typeof(f),))
end
end # module
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 7003 | using Base.BinaryPlatforms, EnumX
const AnyRedirectable = Union{Base.AbstractCmd, Base.TTY, <:IO}
@enumx MountType begin
ReadWrite
ReadOnly
Overlayed
end
struct MountInfo
host_path::String
type::MountType.T
end
export MountInfo, MountType
"""
SandboxConfig(read_only_maps, read_write_maps, env)
Sandbox executors require a configuration to set up the environment properly.
- `read_only_maps`: Directories that are mapped into the sandbox as read-only mappings.
- Specified as pairs, e.g. `sandbox_path => host_path`. All paths must be absolute.
- Must always include a mapping for root, e.g. `"/" => rootfs_path`.
- `read_write_maps`: Directories that are mapped into the sandbox as read-write mappings.
- Specified as pairs, e.g. `sandbox_path => host_path`. All paths must be absolute.
- Note that some executors may not show perfect live updates; consistency is guaranteed
only after execution is finished.
- `env`: Dictionary mapping of environment variables that should be set within the sandbox.
- `entrypoint`: Executable that gets passed the actual command being run.
- This is a path within the sandbox, and must be absolute.
- Defaults to `nothing`, which causes the command to be executed directly.
- `pwd`: Set the working directory of the command that will be run.
- This is a path within the sandbox, and must be absolute.
- `persist`: Tell the executor object to persist changes made to the rootfs.
- This is a boolean value, it is up to interpretation by the executor.
- Persistence is a property of an individual executor and changes live only as long
as the executor object itself.
- You cannot transfer persistent changes from one executor to another.
- `multiarch`: Request multiarch executable support
- This is an array of `Platform` objects
- Sandbox will ensure that interpreters (such as `qemu-*-static` binaries) are
available for each platform.
- Requesting multiarch support for a platform that we don't support results in
an `ArgumentError`.
- `uid` and `gid`: Numeric user and group identifiers to spawn the sandboxed process as.
- By default, these are both `0`, signifying `root` inside the sandbox.
- `stdin`, `stdout`, `stderr`: input/output streams for the sandboxed process.
- Can be any kind of `IO`, `TTY`, `devnull`, etc...
- `hostname`: Set the hostname within the sandbox, defaults to the current hostname
- `verbose`: Set whether the sandbox construction process should be more or less verbose.
"""
struct SandboxConfig
mounts::Dict{String,MountInfo}
env::Dict{String,String}
entrypoint::Union{String,Nothing}
pwd::String
persist::Bool
multiarch_formats::Vector{BinFmtRegistration}
uid::Cint
gid::Cint
tmpfs_size::Union{String, Nothing}
hostname::Union{String, Nothing}
stdin::AnyRedirectable
stdout::AnyRedirectable
stderr::AnyRedirectable
verbose::Bool
function SandboxConfig(mounts::Dict{String,MountInfo},
env::Dict{String,String} = Dict{String,String}();
entrypoint::Union{String,Nothing} = nothing,
pwd::String = "/",
persist::Bool = true,
multiarch::Vector{<:Platform} = Platform[],
uid::Integer=0,
gid::Integer=0,
tmpfs_size::Union{String, Nothing}=nothing,
hostname::Union{String, Nothing}=nothing,
stdin::AnyRedirectable = Base.devnull,
stdout::AnyRedirectable = Base.stdout,
stderr::AnyRedirectable = Base.stderr,
verbose::Bool = false)
# Lint the maps to ensure that all are absolute paths:
for path in [keys(mounts)..., [v.host_path for v in values(mounts)]...,
something(entrypoint, "/"), pwd]
if !startswith(path, "/")
throw(ArgumentError("Path mapping $(path) is not absolute!"))
end
end
for (sandbox_path, mount_info) in mounts
# Force every path to be `realpath()`'ed (up to the point of existence)
# This allows us to point to as-of-yet nonexistant files, but to collapse
# as many symlinks as possible.
mount_info = MountInfo(realpath_stem(mount_info.host_path), mount_info.type)
mounts[sandbox_path] = mount_info
# Disallow ecryptfs mount points, they don't play well with user namespaces.
crypt, mountpoint = is_ecryptfs(mount_info.host_path; verbose)
if crypt
throw(ArgumentError("Path $(mount_info.host_path) is mounted on the ecryptfs filesystem $(mountpoint)!"))
end
end
# Ensure that read_only_maps contains a mapping for the root in the guest:
if !haskey(mounts, "/") || mounts["/"].type != MountType.Overlayed
throw(ArgumentError("Must provide an overlayed root mapping!"))
end
# Collect all multiarch platforms, mapping to the known interpreter for that platform.
multiarch_formats = Set{BinFmtRegistration}()
interp_platforms = collect(keys(platform_qemu_registrations))
for platform in multiarch
# If this platform is natively runnable, skip it
if natively_runnable(platform)
continue
end
platform_idx = findfirst(p -> platforms_match(platform, p), interp_platforms)
if platform_idx === nothing
throw(ArgumentError("Platform $(triplet(platform)) unsupported for multiarch!"))
end
push!(multiarch_formats, platform_qemu_registrations[interp_platforms[platform_idx]])
end
return new(mounts, env, entrypoint, pwd, persist, collect(multiarch_formats), Cint(uid), Cint(gid), tmpfs_size, hostname, stdin, stdout, stderr, verbose)
end
end
# Compatibility shim for `read_only_maps`/`read_write_maps` API:
function SandboxConfig(read_only_maps::Dict{String,String},
read_write_maps::Dict{String,String} = Dict{String,String}(),
env::Dict{String,String} = Dict{String,String}();
kwargs...)
# Our new API uses a unified `mounts` with mount types set:
mounts = Dict{String,MountInfo}()
for (sandbox_path, host_path) in read_only_maps
mt = sandbox_path == "/" ? MountType.Overlayed : MountType.ReadOnly
mounts[sandbox_path] = MountInfo(host_path, mt)
end
for (sandbox_path, host_path) in read_write_maps
if sandbox_path ∈ keys(mounts)
throw(ArgumentError("Cannot specify the same sandbox path twice in maps! ('$(sandbox_path)')"))
end
mounts[sandbox_path] = MountInfo(host_path, MountType.ReadWrite)
end
return SandboxConfig(mounts, env; kwargs...)
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 8883 | using UserNSSandbox_jll
# Use User Namespaces to provide isolation on Linux hosts whose kernels support it
export UserNamespacesExecutor, UnprivilegedUserNamespacesExecutor, PrivilegedUserNamespacesExecutor
abstract type UserNamespacesExecutor <: SandboxExecutor; end
# A version of `chmod()` that hides all of its errors.
function chmod_recursive(root::String, perms, use_sudo::Bool)
files = String[]
try
files = readdir(root)
catch e
if !isa(e, Base.IOError)
rethrow(e)
end
end
for f in files
path = joinpath(root, f)
try
if use_sudo
run(`$(sudo_cmd()) chmod $(string(perms, base=8)) $(path)`)
else
chmod(path, perms)
end
catch e
if !isa(e, Base.IOError)
rethrow(e)
end
end
if isdir(path) && !islink(path)
chmod_recursive(path, perms, use_sudo)
end
end
end
function cleanup(exe::UserNamespacesExecutor)
if exe.persistence_dir !== nothing && isdir(exe.persistence_dir)
# Because a lot of these files are unreadable, we must `chmod +r` them before deleting
chmod_recursive(exe.persistence_dir, 0o777, isa(exe, PrivilegedUserNamespacesExecutor))
try
rm(exe.persistence_dir; force=true, recursive=true)
catch
end
end
end
# Because we can run in "privileged" or "unprivileged" mode, let's treat
# these as two separate, but very similar, executors.
mutable struct UnprivilegedUserNamespacesExecutor <: UserNamespacesExecutor
persistence_dir::Union{String,Nothing}
userxattr::Bool
UnprivilegedUserNamespacesExecutor() = new(nothing, false)
end
mutable struct PrivilegedUserNamespacesExecutor <: UserNamespacesExecutor
persistence_dir::Union{String,Nothing}
userxattr::Bool
PrivilegedUserNamespacesExecutor() = new(nothing, false)
end
Base.show(io::IO, exe::UnprivilegedUserNamespacesExecutor) = write(io, "Unprivileged User Namespaces Executor")
Base.show(io::IO, exe::PrivilegedUserNamespacesExecutor) = write(io, "Privileged User Namespaces Executor")
function executor_available(::Type{T}; verbose::Bool=false) where {T <: UserNamespacesExecutor}
# If we're on a platform that doesn't even have `sandbox` available, return false
if !UserNSSandbox_jll.is_available()
return false
end
return with_executor(T) do exe
return check_kernel_version(;verbose) &&
check_overlayfs_loaded(;verbose) &&
probe_executor(exe; verbose)
end
end
function check_kernel_version(;verbose::Bool = false)
# Don't bother to do anything on non-Linux
if !Sys.islinux()
return false
end
kernel_version = get_kernel_version()
# If we were unable to parse any part of the version number, then warn and exit.
if kernel_version === nothing
@warn("Unable to check version number")
return false
end
# Otherwise, we have a kernel version and if it's too old, we should freak out.
if kernel_version < v"3.18"
@warn("Kernel version too old: detected $(kernel_version), need at least 3.18!")
return false
end
if verbose
@info("Parsed kernel version \"$(kernel_version)\"")
end
return true
end
function check_overlayfs_loaded(;verbose::Bool = false)
if !Sys.islinux()
return false
end
# If the user has disabled this check, return `true`
if parse(Bool, get(ENV, "SANDBOX_SKIP_OVERLAYFS_CHECK", "false"))
return true
end
mods = get_loaded_modules()
if verbose
@info("Found $(length(mods)) loaded modules")
end
filter!(mods) do (name, size, count, deps, state, addr)
return name == "overlay"
end
if isempty(mods)
@warn("""
The `overlay` kernel module is needed for the UserNS executors, but could not find it loaded.
Try `sudo modprobe overlay` or export SANDBOX_SKIP_OVERLAYFS_CHECK=true to disable this check!
""")
return false
end
if verbose
@info("Found loaded `overlay` module")
end
return true
end
function build_executor_command(exe::UserNamespacesExecutor, config::SandboxConfig, user_cmd::Cmd)
# While we would usually prefer to use the `executable_product()` function to get a
# `Cmd` object that has all of the `PATH` and `LD_LIBRARY_PATH` environment variables
# set properly so that the executable product can be run, we are careful to ensure
# that `sandbox` has no dependencies (as much as that is possible).
cmd_string = String[UserNSSandbox_jll.sandbox_path]
# Enable verbose mode on the sandbox wrapper itself
if config.verbose
push!(cmd_string, "--verbose")
end
# Extract the rootfs, as it's treated specially
append!(cmd_string, ["--rootfs", config.mounts["/"].host_path])
# Add our `--cd` command
append!(cmd_string, ["--cd", config.pwd])
# Add in read-only mappings (skipping the rootfs)
for (sandbox_path, mount_info) in config.mounts
if sandbox_path == "/"
continue
end
local mount_type_str
if mount_info.type == MountType.ReadOnly
mount_type_str = ":ro"
elseif mount_info.type == MountType.ReadWrite
mount_type_str = ":rw"
elseif mount_info.type == MountType.Overlayed
mount_type_str = ":ov"
else
throw(ArgumentError("Unknown mount type: $(mount_info.type)"))
end
append!(cmd_string, ["--mount", "$(mount_info.host_path):$(sandbox_path)$(mount_type_str)"])
end
# Add in entrypoint, if it is set
if config.entrypoint !== nothing
append!(cmd_string, ["--entrypoint", config.entrypoint])
end
# If we have a `--persist` argument, check to see if we already have a persistence_dir
# setup, if we do not, create a temporary directory and set it into our executor
if config.persist
if exe.persistence_dir === nothing
# Search for a functional persistence directory
persist_root, userxattr = find_persist_dir_root(config.mounts["/"].host_path; verbose=config.verbose)
if persist_root === nothing
throw(ArgumentError("Unable to find a persistence directory root that works!"))
end
exe.persistence_dir = mktempdir(persist_root)
exe.userxattr = userxattr
end
append!(cmd_string, ["--persist", exe.persistence_dir])
if exe.userxattr
push!(cmd_string, "--userxattr")
end
end
# For each platform requested by `multiarch`, ensure its matching interpreter is registered.
register_requested_formats!(config.multiarch_formats; verbose=config.verbose)
# Set the user and group, if requested
append!(cmd_string, ["--uid", string(config.uid), "--gid", string(config.gid)])
# Set the custom tmpfs_size, if requested
if config.tmpfs_size !== nothing
append!(cmd_string, ["--tmpfs-size", config.tmpfs_size])
end
if config.hostname !== nothing
append!(cmd_string, ["--hostname", config.hostname])
end
# If we're running in privileged mode, we need to add `sudo` (or `su`, if `sudo` doesn't exist)
if isa(exe, PrivilegedUserNamespacesExecutor)
# Next, prefer `sudo`, but allow fallback to `su`. Also, force-set our
# environmental mappings with sudo, because many of these are often lost
# and forgotten due to `sudo` restrictions on setting `LD_LIBRARY_PATH`, etc...
if get(sudo_cmd(), 1, "") == "sudo"
sudo_envs = vcat([["-E", "$k=$(config.env[k])"] for k in keys(config.env)]...)
if user_cmd.env !== nothing
append!(sudo_envs, vcat([["-E", pair] for pair in user_cmd.env]...))
end
prepend!(cmd_string, String[sudo_cmd()..., sudo_envs...])
else
prepend!(cmd_string, sudo_cmd())
end
end
# Finally, append the user-requested command string
push!(cmd_string, "--")
append!(cmd_string, user_cmd.exec)
# Construct a `Cmd` object off of those, with the SandboxConfig's env (if this is an unprivileged runner):
sandbox_cmd = setenv(Cmd(cmd_string), String[])
if isa(exe, UnprivilegedUserNamespacesExecutor)
sandbox_cmd = setenv(sandbox_cmd, config.env)
# If the user has provided an environment with their command, merge that in as well
if user_cmd.env !== nothing
sandbox_cmd = addenv(sandbox_cmd, user_cmd.env)
end
end
# If the user has asked that this command be allowed to fail silently, pass that on
if user_cmd.ignorestatus
sandbox_cmd = ignorestatus(sandbox_cmd)
end
return sandbox_cmd
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 11715 | using Base.BinaryPlatforms
"""
check_binfmt_misc_loaded()
Check that the `binfmt_misc` kernel module is loaded and enabled.
"""
function check_binfmt_misc_loaded()
# If we're not running on Linux, then clearly `binfmt_misc` is not available
if !Sys.islinux()
return false
end
# If the `binfmt_misc` directory doesn't exist, the kernel module is likely not installed
if !isdir("/proc/sys/fs/binfmt_misc")
return false
end
# If the `status` file does not exist, the kernel module may not be loaded, or the
# special `binfmt_misc` filesystem may not be mounted.
if !isfile("/proc/sys/fs/binfmt_misc/status")
return false
end
# Finally, check that the module itself has not been disabled.
return strip(String(read("/proc/sys/fs/binfmt_misc/status"))) == "enabled"
end
"""
BinFmtRegistration
Provides a structured view of a `binfmt_misc` interpreter registration. Note that only "magic"
matching rules are allowed, we do not support "extension" matching rules.
"""
struct BinFmtRegistration
name::String
interpreter::String
flags::Vector{Symbol}
offset::Int64
magic::Vector{UInt8}
mask::Vector{UInt8}
function BinFmtRegistration(name::AbstractString,
interpreter::AbstractString,
flags::Union{AbstractString,Vector{Symbol}},
offset::Integer,
magic::Vector{UInt8},
mask::Union{Nothing,Vector{UInt8}} = nothing)
# Default mask is all `0xff`.
if mask === nothing
mask = UInt8[0xff for _ in 1:length(magic)]
end
if isa(flags, AbstractString)
flags = Symbol.(collect(flags))
end
return new(String(name), String(interpreter), sort(flags), Int64(offset), magic, mask)
end
end
"""
register_string(reg::BinFmtRegistration)
Constructs the string used to register a `binfmt_misc` registration with the `register`
file endpoint within `/proc/sys/fs/binfmt_misc/register`. To actually register the
interpreter, use `write_binfmt_misc_registration()`.
"""
function register_string(reg::BinFmtRegistration)
return string(
":",
reg.name,
":",
# We only support `magic` style registrations
"M",
":",
string(reg.offset),
":",
# We need to actually emit double-escaped hex codes, since that's what `/register` expects.
join([string("\\x", string(x, base=16, pad=2)) for x in reg.magic], ""),
":",
join([string("\\x", string(x, base=16, pad=2)) for x in reg.mask], ""),
":",
reg.interpreter,
":",
join(String.(reg.flags), ""),
)
end
macro check_specified(name)
return quote
if $(esc(name)) === nothing
throw(ArgumentError($("Error, $(name) must be specified")))
end
end
end
"""
BinFmtRegistration(file::String)
Reads a `binfmt_misc` registration in from disk, if it cannot be parsed (because it is
malformed, or uses unsupported features) it an `ArgumentError` will be thrown.
"""
function BinFmtRegistration(file::String)
enabled = false
interpreter = nothing
flags = nothing
offset = nothing
magic = nothing
mask = nothing
for l in strip.(filter(!isempty, split(String(read(file)), "\n")))
# Handle enabled/disabled line
if l in ("enabled", "disabled")
enabled = l == "enabled"
elseif startswith(l, "interpreter ")
interpreter = l[13:end]
elseif startswith(l, "flags:")
flags = l[8:end]
elseif startswith(l, "offset ")
offset = parse(Int64, l[8:end])
elseif startswith(l, "magic ")
magic = hex2bytes(l[7:end])
elseif startswith(l, "mask ")
mask = hex2bytes(l[6:end])
else
@warn("Unknown `binfmt_misc` configuration directive", line=l)
end
end
# Ensure we are only dealing with properly fully-specified binfmt_misc registrations
@check_specified interpreter
@check_specified flags
@check_specified offset
@check_specified magic
# If we found a disabled binfmt_misc registration, just ignore it
if !enabled
return nothing
end
return BinFmtRegistration(basename(file), interpreter, flags, offset, magic, mask)
end
function formats_match(a::BinFmtRegistration, b::BinFmtRegistration)
return (a.magic .& a.mask) == (b.magic .& b.mask)
end
"""
read_binfmt_misc_registrations()
Return a list of `BinFmtRegistration` objects, one per readable registration, as found
sitting in `/proc/sys/fs/binfmt_misc/*`. Registrations that cannot be parsed are
silently ignored.
"""
function read_binfmt_misc_registrations()
if !check_binfmt_misc_loaded()
return String[]
end
registrations = BinFmtRegistration[]
for f in readdir("/proc/sys/fs/binfmt_misc"; join=true)
# Skip "special" files
if basename(f) ∈ ("register", "status")
continue
end
try
reg = BinFmtRegistration(f)
if reg !== nothing
push!(registrations, reg)
end
catch e
if isa(e, ArgumentError)
continue
end
rethrow(e)
end
end
return registrations
end
sudo_tee(f::Function, path::String) = open(f, Cmd([sudo_cmd()..., "tee", "-a", path]), write=true)
"""
write_binfmt_misc_registration(reg::BinFmtRegistration)
Write a `binfmt_misc` registration out to the kernel's `register` file endpoint.
Requires `sudo` privileges.
"""
function write_binfmt_misc_registration!(reg::BinFmtRegistration)
try
sudo_tee("/proc/sys/fs/binfmt_misc/register") do io
write(io, register_string(reg))
end
catch e
@error("Unable to register binfmt_misc format", register_string=register_string(reg))
rethrow(e)
end
end
function clear_binfmt_misc_registrations!()
sudo_tee("/proc/sys/fs/binfmt_misc/status") do io
write(io, "-1")
end
return nothing
end
"""
register_requested_formats(formats::Vector{BinFmtRegistration})
Given the list of `binfmt_misc` formats, check the currently-registered formats through
`read_binfmt_misc_registrations()`, check to see if any in `formats` are not yet
registered, and if they are not, call `write_binfmt_misc_registration!()` to register
it with an artifact-sourced `qemu-*-static` binary.
"""
function register_requested_formats!(formats::Vector{BinFmtRegistration}; verbose::Bool = false)
# Do nothing if we're not asking for any formats.
if isempty(formats)
return nothing
end
# Read in the current binfmt_misc registrations:
if !check_binfmt_misc_loaded()
error("Cannot provide multiarch support if `binfmt_misc` not loaded!")
end
regs = read_binfmt_misc_registrations()
# For each format, If there are no pre-existing registrations add it to `formats_to_register`
formats_to_register = BinFmtRegistration[]
for reg in formats
if !any(formats_match.(Ref(reg), regs))
push!(formats_to_register, BinFmtRegistration(
reg.name,
# This artifact contains the necessary QEMU executable (for the current host architecture)
@artifact_str("$(reg.name)/$(reg.name)-static"),
reg.flags,
reg.offset,
reg.magic,
reg.mask,
))
end
end
# Notify the user if we have any formats to register, then register them.
if !isempty(formats_to_register)
format_names = sort([f.name for f in formats_to_register])
msg = "Registering $(length(formats_to_register)) binfmt_misc entries, this may ask for your `sudo` password."
if verbose
@info(msg, formats=format_names)
elseif (Sys.which("sudo") !== nothing) && (success(`sudo -k -n true`))
# in this case, we know that `sudo` will not prompt the user for a password
else
@info(msg)
end
write_binfmt_misc_registration!.(formats_to_register)
end
return nothing
end
## binfmt_misc registration templates for various architectures.
## Note that these are true no matter the host architecture; e.g. these
## can just as easily point at `x86_64-qemu-aarch64-static` as `ppc64le-qemu-aarch64-static`.
## In fact, the interpreter path typically gets overwritten in `build_executor_command` anyway.
const qemu_x86_64 = BinFmtRegistration(
"qemu-x86_64",
"/usr/bin/qemu-x86_64-static",
"OFC",
0,
UInt8[0x7f, 0x45, 0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x3e, 0x00],
UInt8[0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff],
)
const qemu_i386 = BinFmtRegistration(
"qemu-i386",
"/usr/bin/qemu-i386-static",
"OFC",
0,
UInt8[0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x03, 0x00],
UInt8[0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff],
)
const qemu_aarch64 = BinFmtRegistration(
"qemu-aarch64",
"/usr/bin/qemu-aarch64-static",
"OFC",
0,
UInt8[0x7f, 0x45, 0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xb7, 0x00],
UInt8[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff],
)
const qemu_arm = BinFmtRegistration(
"qemu-arm",
"/usr/bin/qemu-arm-static",
"OFC",
0,
UInt8[0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x28, 0x00],
UInt8[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff],
)
const qemu_ppc64le = BinFmtRegistration(
"qemu-ppc64le",
"/usr/bin/qemu-ppc64le-static",
"OFC",
0,
UInt8[0x7f, 0x45, 0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x15, 0x00],
UInt8[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x00],
)
const platform_qemu_registrations = Dict(
# We register these `qemu-*-static` executables as capable of interpreting both `glibc` and `musl` platforms:
Platform("x86_64", "linux"; libc="glibc") => qemu_x86_64,
Platform("x86_64", "linux"; libc="musl") => qemu_x86_64,
Platform("i686", "linux"; libc="glibc") => qemu_i386,
Platform("i686", "linux"; libc="musl") => qemu_i386,
Platform("aarch64", "linux"; libc="glibc") => qemu_aarch64,
Platform("aarch64", "linux"; libc="musl") => qemu_aarch64,
Platform("armv7l", "linux"; libc="glibc") => qemu_arm,
Platform("armv7l", "linux"; libc="musl") => qemu_arm,
Platform("ppc64le", "linux"; libc="glibc") => qemu_ppc64le,
Platform("ppc64le", "linux"; libc="musl") => qemu_ppc64le,
)
# Define what is a natively-runnable
const host_arch = arch(HostPlatform())
function natively_runnable(p::Platform)
if host_arch == "x86_64"
return arch(p) ∈ ("x86_64", "i686")
end
# Disabling this for now until I figure out how to reliably determine whether this is true.
#=
if host_arch == "aarch64"
return arch(p) ∈ ("aarch64", "armv7l")
end
=#
return host_arch == arch(p)
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 12190 | """
max_directory_ctime(prefix::String)
Takes the `stat()` of all files in a directory root, keeping the maximum ctime,
recursively. Comparing just this value allows for quick directory change detection.
"""
function max_directory_ctime(prefix::String)
max_time = 0.0
for (root, dirs, files) in walkdir(prefix)
for f in files
max_time = max(max_time, lstat(joinpath(root, f)).ctime)
end
end
return max_time
end
function get_mounts(;verbose::Bool = false)
# Get a listing of the current mounts. If we can't do this, just give up
if !isfile("/proc/mounts")
if verbose
@info("Couldn't open /proc/mounts, returning...")
end
return Tuple{String,SubString{String}}[]
end
mounts = String(read("/proc/mounts"))
# Grab the fstype and the mountpoints
mounts = [split(m)[2:3] for m in split(mounts, "\n") if !isempty(m)]
# Canonicalize mountpoints now so as to dodge symlink difficulties
mounts = [(abspath(m[1]*"/"), m[2]) for m in mounts]
return mounts
end
"""
realpath_stem(path::AbstractString)
Given a path, return the `realpath` of it. If it does not exist, try to
resolve the `realpath` of its containing directory, then append the tail
portion onto the end of that resolved stem. This iterates until we find a stem
that can be resolved.
This allows for resolving directory symlinks halfway through a path, while not
requiring that the final path leaf exist at the time of calling
`realpath_stem()`. Of course, if the final path leaf is itself a symlink, this
will not work correctly, so this should be considered a "best effort" function.
Internally, we use this to attempt to discover the actual mountpoint a mapping
is or will be stored on.
"""
function realpath_stem(path::AbstractString)
if ispath(path)
return realpath(path)
end
dir, leaf = splitdir(path)
if dir == path
# This shouldn't really be possible
throw(ArgumentError("Unable to find any real component of path!"))
end
return joinpath(realpath_stem(dir), leaf)
end
"""
is_ecryptfs(path::AbstractString; verbose::Bool=false)
Checks to see if the given `path` (or any parent directory) is placed upon an
`ecryptfs` mount. This is known not to work on current kernels, see this bug
for more details: https://bugzilla.kernel.org/show_bug.cgi?id=197603
This method returns whether it is encrypted or not, and what mountpoint it used
to make that decision.
"""
function is_ecryptfs(path::AbstractString; verbose::Bool=false)
# Canonicalize `path` immediately, and if it's a directory, add a "/" so
# as to be consistent with the rest of this function
path = abspath(path)
if isdir(path)
path = abspath(path * "/")
end
if verbose
@info("Checking to see if $path is encrypted...")
end
# Get a listing of the current mounts. If we can't do this, just give up
mounts = get_mounts()
if isempty(mounts)
return false, path
end
# Fast-path asking for a mountpoint directly (e.g. not a subdirectory)
direct_path = [m[1] == path for m in mounts]
local parent
if any(direct_path)
parent = mounts[findfirst(direct_path)]
else
# Find the longest prefix mount:
parent_mounts = [m for m in mounts if startswith(path, m[1])]
if isempty(parent_mounts)
# This is weird; this means that we can't find any mountpoints that
# hold the given path. I've only ever seen this in `chroot`'ed scenarios.
return false, path
end
parent = parent_mounts[argmax(map(m->length(m[1]), parent_mounts))]
end
# Return true if this mountpoint is an ecryptfs mount
val = parent[2] == "ecryptfs"
if verbose && val
@info(" -> $path is encrypted from mountpoint $(parent[1])")
end
return val, parent[1]
end
"""
uname()
On Linux systems, return the strings returned by the `uname()` function in libc.
"""
function uname()
@static if !Sys.islinux()
return String[]
end
# The uname struct can have wildly differing layouts; we take advantage
# of the fact that it is just a bunch of NULL-terminated strings laid out
# one after the other, and that it is (as best as I can tell) at maximum
# around 1.5KB long. We bump up to 2KB to be safe.
uname_struct = zeros(UInt8, 2048)
ccall(:uname, Cint, (Ptr{UInt8},), uname_struct)
# Parse out all the strings embedded within this struct
strings = String[]
idx = 1
while idx < length(uname_struct)
# Extract string
new_string = unsafe_string(pointer(uname_struct, idx))
push!(strings, new_string)
idx += length(new_string) + 1
# Skip trailing zeros
while uname_struct[idx] == 0 && idx < length(uname_struct)
idx += 1
end
end
return strings
end
"""
get_kernel_version(;verbose::Bool = false)
Use `uname()` to get the kernel version and parse it out as a `VersionNumber`,
returning `nothing` if parsing fails or this is not `Linux`.
"""
function get_kernel_version(;verbose::Bool = false)
if !Sys.islinux()
return nothing
end
uname_strings = try
uname()
catch e
if isa(e, InterruptException)
rethrow(e)
end
@warn("Unable to run `uname()` to check version number!")
return nothing
end
# Some distributions tack extra stuff onto the version number. We walk backwards
# from the end, searching for the longest string that we can extract a VersionNumber
# out of. We choose a minimum length of 5, as all kernel version numbers will be at
# least `X.Y.Z`.
for end_idx in length(uname_strings[3]):-1:5
try
return VersionNumber(uname_strings[3][1:end_idx])
catch e
if isa(e, InterruptException)
rethrow(e)
end
end
end
# We could never parse anything good out of it. :(
if verbose
@warn("Unablet to parse a VersionNumber out of uname output", uname_strings)
end
return nothing
end
"""
get_loaded_modules()
Returns a list of modules currently loaded by the system. On non-Linux platforms,
returns an empty list.
"""
function get_loaded_modules()
@static if !Sys.islinux()
return Vector{String}[]
end
!isfile("/proc/modules") && return Vector{SubString{String}}[]
filter!(split.(readlines("/proc/modules"))) do (name, size, count, deps, state, addr)
state == "Live"
end
end
"""
getuid()
Wrapper around libc's `getuid()` function
"""
getuid() = ccall(:getuid, Cint, ())
"""
getgid()
Wrapper around libc's `getgid()` function
"""
getgid() = ccall(:getgid, Cint, ())
_sudo_cmd = nothing
function sudo_cmd()
global _sudo_cmd
# Use cached value if we've already run this
if _sudo_cmd !== nothing
return _sudo_cmd
end
if getuid() == 0
# If we're already root, don't use any kind of sudo program
_sudo_cmd = String[]
elseif Sys.which("sudo") !== nothing && success(`sudo -V`)
# If `sudo` is available, use that
_sudo_cmd = ["sudo"]
elseif Sys.which("su") !== nothing && success(`su --version`)
# Fall back to `su` if all else fails
_sudo_cmd = ["su", "root", "-c"]
else
@warn("No known sudo-like wrappers!")
_sudo_cmd = String[]
end
return _sudo_cmd
end
_env_pref_dict = Dict{AbstractString,Union{AbstractString,Nothing}}()
"""
load_env_pref(env_var, prefs_name, default)
Many pieces of `Sandbox.jl` functionality can be controlled either through
environment variables or preferences. This utility function makes it easy
to check first the environment, then preferences, finally falling back to
the default. Additionally, it memoizes the result in a caching dictionary.
"""
function load_env_pref(env_var::AbstractString, prefs_name::AbstractString,
default::Union{AbstractString,Nothing})
if !haskey(_env_pref_dict, env_var)
_env_pref_dict[env_var] = get(ENV, env_var, @load_preference(prefs_name, default))
end
return _env_pref_dict[env_var]
end
"""
default_persist_root_dirs()
Returns the default list of directories that should be attempted to be used as
persistence storage. Influenced by the `SANDBOX_PERSISTENCE_DIR` environment
variable, as well as the `persist_dir` preference. The last place searched by
default is the `persist_dirs` scratch space.
"""
function default_persist_root_dirs()
# While this function appears to be duplicating much of the logic within
# `load_env_pref()`, it actually collects all of the given values, rather
# than choosing just one.
dirs = String[]
# When doing nested sandboxing, we pass information via environment variables:
if haskey(ENV, "SANDBOX_PERSISTENCE_DIR")
push!(dirs, ENV["SANDBOX_PERSISTENCE_DIR"])
end
# If the user has set a persistence dir preference, try that too
ppd_pref = @load_preference("persist_dir", nothing)
if ppd_pref !== nothing
push!(dirs, ppd_pref)
end
# Storing in a scratch space (which is within our writable depot) usually works,
# except when our depot is on a `zfs` or `ecryptfs` mount, for example.
push!(dirs, @get_scratch!("persist_dirs"))
return dirs
end
function find_persist_dir_root(rootfs_path::String, dir_hints::Vector{String} = default_persist_root_dirs(); verbose::Bool = false)
function probe_overlay_mount(rootfs_path, mount_path; verbose::Bool = false, userxattr::Bool = false)
probe_exe = UserNSSandbox_jll.overlay_probe_path
probe_args = String[]
if verbose
push!(probe_args, "--verbose")
end
if userxattr
push!(probe_args, "--userxattr")
end
return success(run(pipeline(ignorestatus(
`$(probe_exe) $(probe_args) $(realpath(rootfs_path)) $(realpath(mount_path))`
); stdout = verbose ? stdout : devnull, stderr = verbose ? stderr : devnull)))
end
# If one of our `dir_hints` works, use that, as those are typically our first
# choices; things like a scratchspace, a user-supplied path, etc...
for mount_path in dir_hints, userxattr in (true, false)
if probe_overlay_mount(rootfs_path, mount_path; userxattr, verbose)
return (mount_path, userxattr)
end
end
# Otherwise, walk over the list of mounts, excluding mount types we know won't work
disallowed_mount_types = Set([
# ecryptfs doesn't play nicely with sandboxes at all
"ecryptfs",
# zfs does not support features (RENAME_WHITEOUT) required for overlay upper dirs
"zfs",
# overlays cannot stack, of course
"overlay",
# Exclude mount types that are not for storing data:
"auristorfs",
"autofs",
"binfmt_misc",
"bpf",
"cgroup2",
"configfs",
"debugfs",
"devpts",
"devtmpfs",
"efivarfs",
"fusectl",
"hugetlbfs",
"mqueue",
"proc",
"pstore",
"ramfs",
"rpc_pipefs",
"securityfs",
"sysfs",
"tracefs",
])
mounts = first.(filter(((path, type),) -> type ∉ disallowed_mount_types, get_mounts()))
# Filter each `mount` point on a set of criteria that we like (e.g. the mount point
# is owned by us (user-specific `tmpdir`, for instance))
function owned_by_me(path)
try
return stat(path).uid == getuid()
catch e
if isa(e, Base.IOError) && -e.code ∈ (Base.Libc.EACCES,)
return false
end
rethrow(e)
end
end
sort!(mounts; by = owned_by_me, rev=true)
for mount_path in mounts, userxattr in (true, false)
if probe_overlay_mount(rootfs_path, mount_path; userxattr, verbose)
return (mount_path, userxattr)
end
end
# Not able to find a SINGLE persistent directory location that works!
return (nothing, false)
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 4622 | using Test, Sandbox, Scratch, Base.BinaryPlatforms
function with_temp_scratch(f::Function)
mktempdir() do scratch_dir
with_scratch_directory(scratch_dir) do
f()
end
end
end
if executor_available(DockerExecutor)
@testset "Docker" begin
uid = Sandbox.getuid()
gid = Sandbox.getgid()
with_temp_scratch() do
# With a temporary scratch directory, let's start by testing load/save timestamps
@test !isfile(Sandbox.timestamps_path())
@test isempty(Sandbox.load_timestamps())
Sandbox.save_timestamp("foo", 1.0)
@test isfile(Sandbox.timestamps_path())
timestamps = Sandbox.load_timestamps()
@test timestamps["foo"] == 1.0
# Next, let's actually create a docker image out of our debian rootfs image
mktempdir() do rootfs_path; mktempdir() do data_path
mounts = Dict(
"/" => MountInfo(rootfs_path, MountType.Overlayed),
"/data" => MountInfo(data_path, MountType.Overlayed),
)
overlay_mounts = Dict(p => m.host_path for (p, m) in mounts if m.type == MountType.Overlayed)
cp(Sandbox.debian_rootfs(), rootfs_path; force=true)
@test Sandbox.should_build_docker_image(overlay_mounts, uid, gid)
@test_logs (:info, r"Building docker image") match_mode=:any begin
Sandbox.build_docker_image(mounts, uid, gid; verbose=true)
end
# Ensure that it doesn't try to build again since the content is unchanged
@test !Sandbox.should_build_docker_image(overlay_mounts, uid, gid)
@test_logs begin
Sandbox.build_docker_image(mounts, uid, gid; verbose=true)
end
# Change the content
chmod(joinpath(rootfs_path, "bin", "bash"), 0o775)
@test Sandbox.should_build_docker_image(overlay_mounts, uid, gid)
@test_logs (:info, r"Building docker image") match_mode=:any begin
Sandbox.build_docker_image(mounts, uid, gid; verbose=true)
end
# Ensure that it once again doesn't try to build
@test !Sandbox.should_build_docker_image(overlay_mounts, uid, gid)
@test_logs begin
Sandbox.build_docker_image(mounts, uid, gid; verbose=true)
end
# change the content of `/data`:
touch(joinpath(data_path, "foo"))
@test Sandbox.should_build_docker_image(overlay_mounts, uid, gid)
@test_logs (:info, r"Building docker image") match_mode=:any begin
Sandbox.build_docker_image(mounts, uid, gid; verbose=true)
end
# Ensure that it once again doesn't try to build
@test !Sandbox.should_build_docker_image(overlay_mounts, uid, gid)
@test_logs begin
Sandbox.build_docker_image(mounts, uid, gid; verbose=true)
end
end; end
end
@testset "probe_executor" begin
with_executor(DockerExecutor) do exe
@test probe_executor(exe)
end
end
@testset "pull_docker_image" begin
curr_arch = arch(HostPlatform())
platform = nothing
if curr_arch == "x86_64"
platform = "linux/amd64"
elseif curr_arch == "aarch64"
platform = "linux/arm64"
end
with_temp_scratch() do
julia_rootfs = Sandbox.pull_docker_image("julia:latest"; force=true, verbose=true, platform)
@test_logs (:warn, r"Will not overwrite") begin
other_julia_rootfs = Sandbox.pull_docker_image("julia:latest"; verbose=true, platform)
@test other_julia_rootfs == julia_rootfs
end
@test_logs (:warn, r"Cannot pull") begin
@test Sandbox.pull_docker_image("pleasenooneactuallycreateanimagenamedthis"; verbose=true) === nothing
end
@test julia_rootfs !== nothing
@test isdir(julia_rootfs)
# Ensure it pulls a rootfs that actually contains `julia`
@test isfile(joinpath(julia_rootfs, "usr", "local", "julia", "bin", "julia"))
end
end
end
else
@error("Skipping Docker tests, as it does not seem to be available")
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 4710 | using Test, Sandbox, Base.BinaryPlatforms, LazyArtifacts
@testset "multiarch" begin
# This is a set of multiarch platforms that is _not_ our current platform
native_arch = arch(HostPlatform())
sub_arch = native_arch
if native_arch == "x86_64"
sub_arch = "i686"
# Disabled for now
#elseif native_arch == "aarch64"
# sub_arch = "armv7l"
end
alien_arch = native_arch ∈ ("x86_64", "i686") ? "aarch64" : "x86_64"
@testset "argument parsing" begin
# Test that our `multiarch` kwarg is correctly parsed
config = SandboxConfig(
Dict("/" => Sandbox.multiarch_rootfs());
multiarch = [
Platform(alien_arch, "linux"; libc="glibc"),
Platform(alien_arch, "linux"; libc="musl"),
Platform(native_arch, "linux"; libgfortran_version=v"4"),
Platform(native_arch, "linux"; libgfortran_version=v"5"),
Platform(sub_arch, "linux"),
],
)
# qemu doesn't care about `libc` or `libgfortran_version` or anything like that.
# Also, native architectures (and sub-architectures such as `i686` for `x86_64`,
# or `armv7l` for `aarch64`) get ignored, so we end up with only one multiarch
# format from all that above, which is just `alien_arch`.
@test length(config.multiarch_formats) == 1
@test occursin(alien_arch, config.multiarch_formats[1].name)
end
# Of our available executors, let's check to see if each can be used to run multiarch workloads
multiarch_executors = filter(executor_available, Sandbox.all_executors)
old_binfmt_misc_regs = nothing
if get(ENV, "SANDBOX_TEST_MULTIARCH", "true") != "true"
@warn("Refusing to test multiarch because SANDBOX_TEST_MULTIARCH set to $(ENV["SANDBOX_TEST_MULTIARCH"])")
multiarch_executors = Sandbox.SandboxExecutor[]
elseif Sys.islinux()
# On Linux, we need passwordless sudo to be able to register things
if Sys.which("sudo") !== nothing && !success(`sudo -k -n true`)
@warn("Refusing to test multiarch on a system without passwordless sudo!")
multiarch_executors = Sandbox.SandboxExecutor[]
end
# Otherwise, let's save the current set of binfmt_misc registrations
old_binfmt_misc_regs = Sandbox.read_binfmt_misc_registrations()
end
for executor in multiarch_executors
if Sys.islinux()
# Start by clearing out the binfmt_misc registrations, so that each executor
# has to set things up from scratch.
Sandbox.clear_binfmt_misc_registrations!()
end
@testset "HelloWorldC_jll" begin
multiarch = [
Platform("x86_64", "linux"; libc="glibc"),
Platform("x86_64", "linux"; libc="musl"),
Platform("i686", "linux"; libc="glibc"),
Platform("i686", "linux"; libc="musl"),
Platform("aarch64", "linux"; libc="glibc"),
Platform("aarch64", "linux"; libc="musl"),
Platform("armv7l", "linux"; libc="glibc"),
Platform("armv7l", "linux"; libc="musl"),
Platform("powerpc64le", "linux"; libc="glibc"),
# We don't have this one yet
#Platform("powerpc64le", "linux"; libc="musl"),
]
stdout = IOBuffer()
stderr = IOBuffer()
config = SandboxConfig(
Dict(
"/" => Sandbox.multiarch_rootfs(),
"/apps" => LazyArtifacts.ensure_artifact_installed("multiarch-testing", joinpath(dirname(@__DIR__), "Artifacts.toml")),
);
multiarch,
stdout,
stderr,
)
# Ensure that we're going to try and install some of these formats
@test !isempty(config.multiarch_formats)
with_executor(executor) do exe
for platform in multiarch
@testset "$(platform)" begin
@test success(exe, config, `/apps/hello_world.$(triplet(platform))`)
@test String(take!(stdout)) == "Hello, World!\n";
@test isempty(String(take!(stderr)))
end
end
end
end
end
if old_binfmt_misc_regs !== nothing && !isempty(old_binfmt_misc_regs)
# Restore old binfmt_misc registrations so that our test suite isn't clobbering things for others
Sandbox.clear_binfmt_misc_registrations!()
Sandbox.write_binfmt_misc_registration!.(old_binfmt_misc_regs)
end
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 7321 | using Test, Sandbox, Scratch, Pkg, Base.BinaryPlatforms
curr_version() = VersionNumber(VERSION.major, VERSION.minor, VERSION.patch)
function get_nestable_julia(target_arch = arch(HostPlatform()), version=VERSION)
function get_julia_url()
arch_folder = target_arch
if target_arch == "x86_64"
arch_folder = "x64"
elseif target_arch == "i686"
arch_folder = "x86"
end
if !isempty(version.prerelease)
# Get the latest version in this major.minor series:
return "https://julialangnightlies-s3.julialang.org/bin/linux/$(target_arch)/$(version.major).$(version.minor)/julia-latest-linux-$(target_arch).tar.gz"
else
return "https://julialang-s3.julialang.org/bin/linux/$(arch_folder)/$(version.major).$(version.minor)/julia-$(version)-linux-$(target_arch).tar.gz"
end
end
sanitized_version = VersionNumber(version.major, version.minor, version.patch)
julia_dir = @get_scratch!("julia-$(target_arch)-$(sanitized_version)")
if !isfile(joinpath(julia_dir, "julia-$(sanitized_version)", "bin", "julia"))
url = get_julia_url()
rm(julia_dir; force=true, recursive=true)
Pkg.PlatformEngines.download_verify_unpack(url, nothing, julia_dir; ignore_existence=true, verbose=true)
end
return joinpath(julia_dir, only(readdir(julia_dir)))
end
@testset "Nesting Sandbox.jl" begin
all_executors = Sandbox.all_executors
rootfs_dir = Sandbox.debian_rootfs()
for executor in all_executors
if !executor_available(executor)
@error("Skipping $(executor) tests, as it does not seem to be available")
continue
end
# Nested sandboxing explicitly does not work with privileged user namespaces,
# since the whole issue is that once we've dropped privileges the kernel cannot
# sandbox properly (hence the need to use privileged executor at all).
if executor <: PrivilegedUserNamespacesExecutor
continue
end
@testset "$(executor) Nesting" begin
pkgdir = dirname(@__DIR__)
mktempdir() do dir
# Directory to hold read-writing from nested sandboxen
rw_dir = joinpath(dir, "rw")
mkpath(rw_dir)
mkpath(joinpath(rw_dir, "home"))
# Directory to hold sandbox persistence data
persist_dir = mktempdir(first(Sandbox.find_persist_dir_root(rootfs_dir)))
ro_mappings = Dict(
# Mount in the rootfs
"/" => rootfs_dir,
# Mount our package in at its own location
pkgdir => pkgdir,
# Mount our current active project, which may contain a local
# preferences file with a custom sandbox path.
"/project" => dirname(Base.active_project()),
# Mount in a Julia that can run in this sandbox
"/usr/local/julia" => get_nestable_julia(),
)
# On the off-chance that we're using a custom `sandbox`,
# make sure it's available at the path that the project will expect
if UserNSSandbox_jll.is_available()
sandbox_path = dirname(Sandbox.UserNSSandbox_jll.sandbox_path)
ro_mappings[sandbox_path] = sandbox_path
end
# Mount in `/etc/resolv.conf` as a read-only mount if using a UserNS executor, so that we have DNS
if executor <: UserNamespacesExecutor && isfile("/etc/resolv.conf")
resolv_conf = joinpath(rw_dir, "resolv.conf")
cp("/etc/resolv.conf", resolv_conf; follow_symlinks=true)
ro_mappings["/etc/resolv.conf"] = resolv_conf
end
# read-write mappings
rw_mappings = Dict(
# Mount a temporary directory in as writable
"/tmp/readwrite" => rw_dir,
# Mount a directory to hold our persistent sandbox data
"/sandbox_persistence" => persist_dir,
)
# Build environment mappings
env = Dict(
"PATH" => "/usr/local/julia/bin:/usr/local/bin:/usr/bin:/bin",
"HOME" => "/tmp/readwrite/home",
# Because overlayfs nesting with persistence requires mounting an overlayfs with
# a non-tmpfs-hosted workdir, and that's illegal on top of another overlayfs, we
# need to thread our persistence mappings through to the client. We do so by
# bind-mounting `/sandbox_persistence` into the sandbox for future recursive mountings
"SANDBOX_PERSISTENCE_DIR" => "/sandbox_persistence",
)
# If we're a nested sandbox, pass the forcing through
if haskey(ENV, "FORCE_SANDBOX_MODE")
env["FORCE_SANDBOX_MODE"] = ENV["FORCE_SANDBOX_MODE"]
end
config = SandboxConfig(
ro_mappings,
rw_mappings,
env;
pwd = pkgdir,
uid = Sandbox.getuid(),
gid = Sandbox.getgid(),
)
cmd = `/bin/sh -c "julia --color=yes test/nested/nested_child.jl"`
with_executor(executor) do exe
@test success(exe, config, cmd)
end
@test isfile(joinpath(rw_dir, "single_nested.txt"))
@test isfile(joinpath(rw_dir, "double_nested.txt"))
@test String(read(joinpath(rw_dir, "single_nested.txt"))) == "aperture\n"
@test String(read(joinpath(rw_dir, "double_nested.txt"))) == "science\n"
if executor <: DockerExecutor
stderr = IOBuffer()
config_with_stderr = SandboxConfig(
ro_mappings,
# Mount a temporary directory in as writable
Dict("/tmp/readwrite" => rw_dir),
# Add the path to `julia` onto the path
Dict(
"PATH" => "/usr/local/julia/bin:/usr/local/bin:/usr/bin:/bin",
"HOME" => "/tmp/readwrite/home",
);
pwd = pkgdir,
uid = Sandbox.getuid(),
gid = Sandbox.getgid(),
stderr = stderr,
persist = false,
)
for privileges in [:no_new_privileges, :unprivileged]
with_executor(executor; privileges) do exe
@test !success(exe, config_with_stderr, cmd)
# Ensure that we get the nested sandbox unable to run any nested sandboxing
@test_broken occursin("Could not find any available executors", String(take!(stderr)))
end
end
end
end
end
end
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 17542 | using Test, Sandbox, SHA, Base.BinaryPlatforms
all_executors = Sandbox.all_executors
# Can we run `sudo` without a password? If not, don't attempt to test the privileged runner
if Sys.which("sudo") !== nothing && !success(`sudo -k -n true`)
all_executors = filter(exe -> exe != PrivilegedUserNamespacesExecutor, all_executors)
end
if Sandbox.getuid() == 0
all_executors = filter(exe -> exe != UnprivilegedUserNamespacesExecutor, all_executors)
end
function print_if_nonempty(stderr::Vector{UInt8})
if !isempty(stderr)
stderr = String(stderr)
@error("not empty")
println(stderr)
return false
end
return true
end
rootfs_dir = Sandbox.debian_rootfs()
for executor in all_executors
if !executor_available(executor)
@error("Skipping $(executor) tests, as it does not seem to be available")
continue
end
@testset "$(executor) Sandboxing" begin
@testset "capturing stdout/stderr" begin
stdout = IOBuffer()
stderr = IOBuffer()
config = SandboxConfig(
Dict("/" => rootfs_dir);
stdout,
stderr,
)
with_executor(executor) do exe
@test success(exe, config, `/bin/sh -c "echo stdout; echo stderr >&2"`)
@test String(take!(stdout)) == "stdout\n"
@test String(take!(stderr)) == "stderr\n"
end
end
@testset "ignorestatus()" begin
config = SandboxConfig(Dict("/" => rootfs_dir))
with_executor(executor) do exe
@test_throws ProcessFailedException run(exe, config, `/bin/sh -c "false"`)
@test !success(exe, config, ignorestatus(`/bin/sh -c "false"`))
end
end
@testset "environment passing" begin
# Ensure all those pesky "special" variables make it through
env = Dict(
"PATH" => "for",
"LD_LIBRARY_PATH" => "science",
"DYLD_LIBRARY_PATH" => "you",
"SHELL" => "monster",
)
stdout = IOBuffer()
config = SandboxConfig(
Dict("/" => rootfs_dir),
Dict{String,String}(),
env;
stdout,
)
user_cmd = `/bin/sh -c "echo \$PATH \$LD_LIBRARY_PATH \$DYLD_LIBRARY_PATH \$SHELL"`
with_executor(executor) do exe
@test success(exe, config, user_cmd)
@test String(take!(stdout)) == "for science you monster\n";
end
# Test that setting some environment onto `user_cmd` can override the `config` env:
user_cmd = setenv(user_cmd, "DYLD_LIBRARY_PATH" => "my", "SHELL" => "friend")
with_executor(executor) do exe
@test success(exe, config, user_cmd)
@test String(take!(stdout)) == "for science my friend\n";
end
end
@testset "reading from mounts" begin
mktempdir() do dir
open(joinpath(dir, "note.txt"), write=true) do io
write(io, "great success")
end
stdout = IOBuffer()
config = SandboxConfig(
Dict("/" => rootfs_dir, "/glados" => dir);
stdout,
)
with_executor(executor) do exe
@test success(exe, config, `/bin/sh -c "cat /glados/note.txt"`)
@test String(take!(stdout)) == "great success";
end
end
end
@testset "writing to mounts" begin
mktempdir() do dir
stdout = IOBuffer()
config = SandboxConfig(
Dict(
"/" => MountInfo(rootfs_dir, MountType.Overlayed),
"/glados" => MountInfo(dir, MountType.ReadWrite),
);
)
with_executor(executor) do exe
@test success(exe, config, `/bin/sh -c "echo aperture > /glados/science.txt"`)
@test isfile(joinpath(dir, "science.txt"))
@test String(read(joinpath(dir, "science.txt"))) == "aperture\n"
end
end
end
@testset "pipelining" begin
pipe = PipeBuffer()
stdout = IOBuffer()
first_config = SandboxConfig(
Dict("/" => rootfs_dir),
stdout = pipe,
)
second_config = SandboxConfig(
Dict("/" => rootfs_dir),
stdin = pipe,
stdout = stdout,
)
with_executor(executor) do exe
@test success(exe, first_config, `/bin/sh -c "echo 'ignore me'; echo 'pick this up foo'; echo 'ignore me as well'"`)
@test success(exe, second_config, `/bin/sh -c "grep foo"`)
@test String(take!(stdout)) == "pick this up foo\n";
end
end
# While we don't strictly care about read-only mounts, we might in the future,
# so we ensure they're supported. What we _truly_ care about is Overlayed,
# mounts, where the modifications are visible only inside the sandbox, and are
# saved within the persistence directory
@testset "ReadOnly, ReadWrite and Overlayed MountTypes" begin
mktempdir() do dir
stdout = IOBuffer()
stderr = IOBuffer()
mkpath(joinpath(dir, "read_only"))
mkpath(joinpath(dir, "read_write"))
mkpath(joinpath(dir, "overlayed"))
config = SandboxConfig(
Dict(
"/" => MountInfo(rootfs_dir, MountType.Overlayed),
"/read_only" => MountInfo(joinpath(dir, "read_only"), MountType.ReadOnly),
"/read_write" => MountInfo(joinpath(dir, "read_write"), MountType.ReadWrite),
"/overlayed" => MountInfo(joinpath(dir, "overlayed"), MountType.Overlayed),
);
stdout,
stderr,
persist=false,
#verbose=executor == DockerExecutor,
)
# Modifying the rootfs works, and is temporary; for docker containers this is modifying
# the rootfs image, for userns this is all mounted within an overlay backed by a tmpfs,
# because we have `persist` set to `false`. Modifying `/read_only` does not work,
# Modifying `/read_write` works and is visible to the host, modifying `/overlayed` works
# but is not visible to the host.
with_executor(executor) do exe
# Because `persist=false`, this is non-persistent.
@test success(exe, config, `/bin/sh -c "echo aperture >> /bin/science && cat /bin/science"`)
@test String(take!(stdout)) == "aperture\n";
@test print_if_nonempty(take!(stderr))
@test success(exe, config, `/bin/sh -c "echo aperture >> /bin/science && cat /bin/science"`)
@test String(take!(stdout)) == "aperture\n";
@test print_if_nonempty(take!(stderr))
# An actual read-only mount will not allow writing, because it's truly read-only
@test !success(exe, config, ignorestatus(`/bin/sh -c "echo aperture >> /read_only/science && cat /read_only/science"`))
@test occursin("Read-only file system", String(take!(stderr)))
@test !isfile(joinpath(dir, "read_only", "science"))
# A read-write mount, on the other hand, will be permanent, and visible to the host
@test success(exe, config, `/bin/sh -c "echo aperture >> /read_write/science && cat /read_write/science"`)
@test String(take!(stdout)) == "aperture\n";
@test print_if_nonempty(take!(stderr))
@test isfile(joinpath(dir, "read_write", "science"))
@test success(exe, config, `/bin/sh -c "echo aperture >> /read_write/science && cat /read_write/science"`)
@test String(take!(stdout)) == "aperture\naperture\n";
@test print_if_nonempty(take!(stderr))
@test isfile(joinpath(dir, "read_write", "science"))
# An overlay mount allows writing and reading, but does not modify the host environment.
# Because this is a non-persistent executor, changes are lost from invocation to invocation.
@test success(exe, config, `/bin/sh -c "echo aperture >> /overlayed/science && cat /overlayed/science"`)
@test String(take!(stdout)) == "aperture\n";
@test print_if_nonempty(take!(stderr))
@test success(exe, config, `/bin/sh -c "echo aperture >> /overlayed/science && cat /overlayed/science"`)
@test String(take!(stdout)) == "aperture\n";
@test print_if_nonempty(take!(stderr))
@test !isfile(joinpath(dir, "overlayed", "science"))
end
end
end
@testset "entrypoint" begin
mktempdir() do dir
read_only_dir = joinpath(dir, "read_only")
mkdir(read_only_dir)
stdout = IOBuffer()
stderr = IOBuffer()
config = SandboxConfig(
Dict("/" => rootfs_dir, "/read_only" => read_only_dir),
entrypoint = "/read_only/entrypoint",
stdout = stdout,
stderr = stderr,
persist = false,
)
# Generate an `entrypoint` script that mounts a tmpfs-backed overlayfs over our read-only mounts
# Allowing us to write to those read-only mounts, but the changes are temporary
open(joinpath(read_only_dir, "entrypoint"), write=true) do io
write(io, """
#!/bin/sh
echo entrypoint activated
mkdir /overlay_workdir
mount -t tmpfs -osize=1G tmpfs /overlay_workdir
mkdir -p /overlay_workdir/upper
mkdir -p /overlay_workdir/work
mount -t overlay overlay -olowerdir=/read_only -oupperdir=/overlay_workdir/upper -oworkdir=/overlay_workdir/work /read_only
exec "\$@"
""")
end
chmod(joinpath(read_only_dir, "entrypoint"), 0o755)
# Modifying the read-only files now works, and is temporary
with_executor(executor) do exe
@test success(exe, config, `/bin/sh -c "echo aperture >> /read_only/science && cat /read_only/science"`)
@test String(take!(stdout)) == "entrypoint activated\naperture\n";
@test print_if_nonempty(take!(stderr))
@test success(exe, config, `/bin/sh -c "echo aperture >> /read_only/science && cat /read_only/science"`)
@test String(take!(stdout)) == "entrypoint activated\naperture\n";
@test print_if_nonempty(take!(stderr))
end
end
end
@testset "persistence" begin
mktempdir() do dir
stdout = IOBuffer()
stderr = IOBuffer()
config = SandboxConfig(
Dict(
"/" => MountInfo(rootfs_dir, MountType.Overlayed),
"/overlayed" => MountInfo(dir, MountType.Overlayed),
),
stdout = stdout,
stderr = stderr,
persist = true,
)
# Modifying the rootfs or the overlay mount is persistent within a single executor
for prefix in ("/bin", "/overlayed")
cmd = `/bin/sh -c "echo aperture >> $prefix/science && cat $prefix/science"`
with_executor(executor) do exe
@test success(exe, config, cmd)
@test String(take!(stdout)) == "aperture\n";
@test print_if_nonempty(take!(stderr))
@test success(exe, config, cmd)
@test String(take!(stdout)) == "aperture\naperture\n";
@test print_if_nonempty(take!(stderr))
@test !isfile(joinpath(dir, "science"))
end
with_executor(executor) do exe
@test success(exe, config, cmd)
@test String(take!(stdout)) == "aperture\n";
@test print_if_nonempty(take!(stderr))
@test !isfile(joinpath(dir, "science"))
end
end
end
end
@testset "explicit user and group" begin
for (uid,gid) in [(0,0), (999,0), (0,999), (999,999)]
stdout = IOBuffer()
config = SandboxConfig(
Dict("/" => rootfs_dir);
stdout, uid, gid
)
with_executor(executor) do exe
@test success(exe, config, `/usr/bin/id`)
str = String(take!(stdout))
@test contains(str, "uid=$(uid)")
@test contains(str, "gid=$(gid)")
end
end
end
# If we have the docker executor available (necessary to do the initial pull),
# let's test launching off of a docker image. Only run this on x86_64 because
# docker doesn't (yet) have images with this name for other architectures.
if executor_available(DockerExecutor) && arch(HostPlatform()) == "x86_64"
julia_rootfs = Sandbox.pull_docker_image("julia:alpine")
@testset "launch from docker image" begin
stdout = IOBuffer()
stderr = IOBuffer()
config = SandboxConfig(
Dict("/" => julia_rootfs),
Dict{String,String}(),
# Add the path to `julia` onto the path, then use `sh` to process the PATH
Dict("PATH" => "/usr/local/julia/bin:/usr/local/bin:/usr/bin:/bin");
stdout = stdout,
stderr = stderr,
)
with_executor(executor) do exe
@test success(exe, config, `/bin/sh -c "julia -e 'println(\"Hello, Julia!\")'"`)
@test String(take!(stdout)) == "Hello, Julia!\n";
@test print_if_nonempty(take!(stderr))
end
end
end
@testset "hostname" begin
stdout = IOBuffer()
config = SandboxConfig(
Dict("/" => rootfs_dir);
stdout,
hostname="sandy",
)
with_executor(executor) do exe
@test success(exe, config, `/bin/uname -n`)
@test chomp(String(take!(stdout))) == "sandy"
end
end
@testset "Internet access" begin
mktempdir() do rw_dir
ro_mappings = Dict(
"/" => rootfs_dir,
)
# Mount in `/etc/resolv.conf` as a read-only mount if using a UserNS executor, so that we have DNS
if executor <: UserNamespacesExecutor && isfile("/etc/resolv.conf")
resolv_conf = joinpath(rw_dir, "resolv.conf")
cp("/etc/resolv.conf", resolv_conf; follow_symlinks=true)
ro_mappings["/etc/resolv.conf"] = resolv_conf
end
# Do a test with the debian rootfs where we try to use `apt` to install `curl`, then use that to download something.
socrates_url = "https://github.com/staticfloat/small_bin/raw/master/socrates.tar.xz"
socrates_hash = "61bcf109fcb749ee7b6a570a6057602c08c836b6f81091eab7aa5f5870ec6475"
config = SandboxConfig(
ro_mappings,
Dict("/tmp/rw_dir" => rw_dir),
Dict("HOME" => "/root");
verbose = true,
)
with_executor(executor) do exe
@test success(exe, config, `/bin/sh -c "apt update && apt install -y curl && curl -L $(socrates_url) -o /tmp/rw_dir/$(basename(socrates_url))"`)
end
socrates_path = joinpath(rw_dir, basename(socrates_url))
@test isfile(socrates_path)
@test open(io -> bytes2hex(sha256(io)), socrates_path) == socrates_hash
end
end
end
end
@testset "default executor" begin
stdout = IOBuffer()
stderr = IOBuffer()
config = SandboxConfig(
Dict("/" => rootfs_dir);
stdout,
stderr,
)
with_executor() do exe
@test success(exe, config, `/bin/sh -c "echo stdout; echo stderr >&2"`)
@test String(take!(stdout)) == "stdout\n";
@test String(take!(stderr)) == "stderr\n";
end
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 4267 | using Test, LazyArtifacts, Sandbox
@testset "SandboxConfig" begin
rootfs_dir = Sandbox.debian_rootfs()
@testset "minimal config" begin
config = SandboxConfig(Dict("/" => rootfs_dir))
@test haskey(config.mounts, "/")
@test config.mounts["/"].host_path == realpath(rootfs_dir)
@test isempty([m for (k, m) in config.mounts if m.type == MountType.ReadWrite])
@test isempty(config.env)
@test config.pwd == "/"
@test config.stdin == Base.devnull
@test config.stdout == Base.stdout
@test config.stderr == Base.stderr
@test config.hostname === nothing
end
@testset "full options" begin
stdout = IOBuffer()
config = SandboxConfig(
# read-only maps
Dict(
"/" => rootfs_dir,
"/lib" => rootfs_dir,
),
# read-write maps
Dict("/workspace" => @__DIR__),
# env
Dict("PATH" => "/bin:/usr/bin");
entrypoint = "/init",
pwd = "/lib",
persist = true,
stdin = Base.stdout,
stdout = stdout,
stderr = Base.devnull,
hostname="sandy",
)
# Test the old style API getting mapped to the new MountInfo API:
@test config.mounts["/"].host_path == realpath(rootfs_dir)
@test config.mounts["/"].type == MountType.Overlayed
@test config.mounts["/lib"].host_path == realpath(rootfs_dir)
@test config.mounts["/lib"].type == MountType.ReadOnly
@test config.mounts["/workspace"].host_path == realpath(@__DIR__)
@test config.mounts["/workspace"].type == MountType.ReadWrite
@test config.env["PATH"] == "/bin:/usr/bin"
@test config.entrypoint == "/init"
@test config.pwd == "/lib"
@test config.persist
@test config.stdin == Base.stdout
@test config.stdout == stdout
@test config.stderr == Base.devnull
@test config.hostname == "sandy"
end
@testset "errors" begin
# No root dir error
@test_throws ArgumentError SandboxConfig(Dict("/rootfs" => rootfs_dir))
# relative dirs error
@test_throws ArgumentError SandboxConfig(Dict("/" => rootfs_dir, "rootfs" => rootfs_dir))
@test_throws ArgumentError SandboxConfig(Dict("/" => rootfs_dir, "/rootfs" => basename(rootfs_dir)))
@test_throws ArgumentError SandboxConfig(Dict("/" => rootfs_dir), Dict("rootfs" => rootfs_dir))
@test_throws ArgumentError SandboxConfig(Dict("/" => rootfs_dir), Dict("/rootfs" => basename(rootfs_dir)))
@test_throws ArgumentError SandboxConfig(Dict("/" => rootfs_dir); pwd="lib")
@test_throws ArgumentError SandboxConfig(Dict("/" => rootfs_dir); entrypoint="init")
end
using Sandbox: realpath_stem
@testset "realpath_stem" begin
mktempdir() do dir
dir = realpath(dir)
mkdir(joinpath(dir, "bar"))
touch(joinpath(dir, "bar", "foo"))
symlink("foo", joinpath(dir, "bar", "l_foo"))
symlink("bar", joinpath(dir, "l_bar"))
symlink(joinpath(dir, "l_bar", "foo"), joinpath(dir, "l_bar_foo"))
# Test that `realpath_stem` works just like `realpath()` on existent paths:
existent_paths = [
joinpath(dir, "bar"),
joinpath(dir, "bar", "foo"),
joinpath(dir, "bar", "l_foo"),
joinpath(dir, "l_bar"),
joinpath(dir, "l_bar", "foo"),
joinpath(dir, "l_bar", "l_foo"),
joinpath(dir, "l_bar_foo"),
]
for path in existent_paths
@test realpath_stem(path) == realpath(path)
end
# Test that `realpath_stem` gives good answers for non-existent paths:
non_existent_path_mappings = [
joinpath(dir, "l_bar", "spoon") => joinpath(dir, "bar", "spoon"),
joinpath(dir, "l_bar", "..", "l_bar", "spoon") => joinpath(dir, "bar", "spoon"),
]
for (non_path, path) in non_existent_path_mappings
@test realpath_stem(non_path) == path
end
end
end
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 4450 | using Test, Sandbox
if Sys.islinux()
@test isa(Sandbox.get_kernel_version(), VersionNumber)
@test Sandbox.check_kernel_version()
end
if executor_available(UnprivilegedUserNamespacesExecutor)
@testset "UnprivilegedUserNamespacesExecutor" begin
with_executor(UnprivilegedUserNamespacesExecutor) do exe
@test probe_executor(exe)
end
end
# Can run these tests only if we can actually mount tmpfs with unprivileged executor.
@testset "Customize the tempfs size" begin
rootfs_dir = Sandbox.debian_rootfs()
mounts = Dict(
"/" => MountInfo(rootfs_dir, MountType.Overlayed),
)
env = Dict(
"PATH" => "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin",
"HOME" => "/home/juliaci",
"USER" => "juliaci",
)
cmd = `/bin/sh -c "mkdir -p /home/juliaci && cd /home/juliaci && dd if=/dev/zero of=sample.txt bs=50M count=1"`
@testset "tempfs is big enough" begin
stdout = IOBuffer()
stderr = IOBuffer()
config = SandboxConfig(mounts, env; tmpfs_size = "1G", stdout, stderr, persist=false)
with_executor(UnprivilegedUserNamespacesExecutor) do exe
@test success(exe, config, cmd)
@test isempty(take!(stdout))
@test startswith(strip(String(take!(stderr))), strip("""
1+0 records in
1+0 records out"""))
end
end
@testset "tempfs is too small" begin
stdout = IOBuffer()
stderr = IOBuffer()
config = SandboxConfig(mounts, env; tmpfs_size = "10M", stdout, stderr, persist=false)
with_executor(UnprivilegedUserNamespacesExecutor) do exe
@test !success(exe, config, cmd)
@test startswith(strip(String(take!(stderr))), strip("""
dd: error writing 'sample.txt': No space left on device
1+0 records in
0+0 records out"""))
end
end
end
@testset "Signal Handling" begin
# This test ensures that killing the child returns a receivable signal
config = SandboxConfig(Dict("/" => Sandbox.debian_rootfs()))
with_executor(UnprivilegedUserNamespacesExecutor) do exe
p = run(exe, config, ignorestatus(`/bin/sh -c "kill -s TERM \$\$"`))
@test p.termsignal == Base.SIGTERM
end
# This test ensures that killing the sandbox executable passes the
# signal on to the child (which then returns a receivable signal)
config = SandboxConfig(Dict("/" => Sandbox.debian_rootfs()))
with_executor(UnprivilegedUserNamespacesExecutor) do exe
stdout = IOBuffer()
stderr = IOBuffer()
signal_test = """
trap "echo received SIGINT" INT
trap "echo received SIGTERM ; trap - TERM; kill -s TERM \$\$" TERM
sleep 2
"""
# We use `build_executor_command()` here so that we can use `run(; wait=false)`.
signal_cmd = pipeline(
ignorestatus(Sandbox.build_executor_command(exe, config, `/bin/sh -c "$(signal_test)"`));
stdout,
stderr
)
p = run(signal_cmd; wait=false)
sleep(0.1)
# Send SIGINT, wait a bit
kill(p, Base.SIGINT)
sleep(0.01)
# Send SIGTERM, wait for process termination
kill(p, Base.SIGTERM)
wait(p)
# Ensure that the sandbox died as we expected, but that the child process got
# the messages and responded appropriately.
@test p.termsignal == Base.SIGTERM
@test String(take!(stdout)) == "received SIGINT\nreceived SIGTERM\n"
end
end
else
@error("Skipping Unprivileged tests, as it does not seem to be available")
end
# Only test privileged runner if sudo doesn't require a password
if Sys.which("sudo") !== nothing && success(`sudo -k -n true`)
if executor_available(PrivilegedUserNamespacesExecutor)
@testset "PrivilegedUserNamespacesExecutor" begin
with_executor(PrivilegedUserNamespacesExecutor) do exe
@test probe_executor(exe)
end
end
else
@error("Skipping Privileged tests, as it does not seem to be available")
end
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 1953 | # If our test harness requests a local sandbox, make it so!
REPO_ROOT = dirname(@__DIR__)
should_build_local_sandbox = parse(Bool, get(ENV, "SANDBOX_BUILD_LOCAL_SANDBOX", "false"))
if should_build_local_sandbox
@info("Building local sandbox")
run(`$(Base.julia_cmd()) --project=$(Base.active_project()) $(REPO_ROOT)/deps/build_local_sandbox.jl`)
else
# Clear out any `LocalPreferences.toml` files that we may or may not have.
for prefix in (REPO_ROOT, joinpath(REPO_ROOT, "test"))
local_prefs = joinpath(prefix, "LocalPreferences.toml")
if isfile(local_prefs)
@warn("Wiping $(local_prefs) as SANDBOX_BUILD_LOCAL_SANDBOX not set...")
rm(local_prefs)
end
end
end
# Remove `SANDBOX_FORCE_MODE`, as we want to test all modes
if haskey(ENV, "FORCE_SANDBOX_MODE")
@warn("Un-setting `FORCE_SANDBOX_MODE` for tests...")
delete!(ENV, "FORCE_SANDBOX_MODE")
end
using Test, Sandbox, Scratch
# If we're on a UserNSSandbox_jll-compatible system, ensure that the sandbox is coming from where we expect.
UserNSSandbox_jll = Sandbox.UserNSSandbox_jll
if UserNSSandbox_jll.is_available()
Artifacts = Sandbox.UserNSSandbox_jll.Artifacts
sandbox_path = Sandbox.UserNSSandbox_jll.sandbox_path
@info("On a UserNSSandbox_jll-capable platform", sandbox_path)
if should_build_local_sandbox
@test startswith(UserNSSandbox_jll.sandbox_path, Scratch.scratch_dir())
else
@test any(startswith(UserNSSandbox_jll.sandbox_path, d) for d in Artifacts.artifacts_dirs())
end
end
# Ensure we're not running as root; that breaks unprivileged user namespaces testing
if Sandbox.getuid() == 0
@warn("You are running Sandbox.jl tests as root! This cannot test unprivileged namespaces!")
end
include("SandboxConfig.jl")
include("UserNamespaces.jl")
include("Docker.jl")
include("Sandbox.jl")
if Sys.islinux()
include("Nesting.jl")
include("Multiarch.jl")
end
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | code | 1689 | # When we are a child, we do the following:
# 1. Set up a temp directory that we can write to.
# 2. Copy the contents of package's project from `/project` (which is read-only)
# to our temp directory (which is read-and-write).
# 3. Delete any manifest files in the temp directory (which may refer to host paths).
# 4. Instantiate the environment inside our temp directory.
original_app_directory = "/project" # this is read-only; we cannot write to this directory
new_app_directory = mktempdir(; cleanup = true) # we have write access to this directory
cp(original_app_directory, new_app_directory; force=true)
#rm.(joinpath.(Ref(new_app_directory), Base.manifest_names); force = true)
using Pkg
Pkg.activate(new_app_directory)
Pkg.instantiate()
Pkg.precompile()
# Load Sandbox, then try to launch a nested sandbox
using Sandbox, Test
rootfs_dir = Sandbox.debian_rootfs()
config = SandboxConfig(
# This rootfs was downloaded within the sandbox in the `Pkg.instantiate()` above
Dict("/" => rootfs_dir),
# Propagate our readwrite mounting into the nested sandbox
Dict{String,String}("/tmp/readwrite" => "/tmp/readwrite"),
persist=true,
verbose=true,
)
# Prove that we can write into the `readwrite` location
open("/tmp/readwrite/single_nested.txt", "w") do io
println(io, "aperture")
end
# For debugging, dump the list of mounts:
#run(`mount`)
# This should always default to the unprivileged executor, since if we're nested, `FORCE_SANDBOX_MODE` should be set
with_executor() do exe
@test success(exe, config, `/bin/sh -c "echo science > /tmp/readwrite/double_nested.txt"`)
end
@test String(read("/tmp/readwrite/double_nested.txt")) == "science\n"
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | docs | 3427 | # Sandbox.jl
[![Stable][docs-stable-img]][docs-stable-url]
[![Dev][docs-dev-img]][docs-dev-url]
[![Build Status][ci-img]][ci-url]
[![Coverage][codecov-img]][codecov-url]
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://staticfloat.github.io/Sandbox.jl/stable
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://staticfloat.github.io/Sandbox.jl/dev
[ci-img]: https://github.com/staticfloat/Sandbox.jl/workflows/CI/badge.svg
[ci-url]: https://github.com/staticfloat/Sandbox.jl/actions/workflows/ci.yml
[codecov-img]: https://codecov.io/gh/staticfloat/Sandbox.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/staticfloat/Sandbox.jl
> The cultured host's toolkit for ill-mannered Linux guests.
This package provides basic containerization tools for running Linux guests on a variety of platforms.
As of the time of writing, it supports two execution backends:
* A Linux User Namespaces executor, which is very fast and lightweight
* A [Docker](https://www.docker.com/) (or [Podman](https://podman.io/)) executor which is slower, but more compatible (it works on macOS, and may work on Windows)
The executors are responsible for running/virtualizing a given `Cmd` within a root filesystem that is defined by the user, along with various paths that can be mounted within the sandbox.
These capabilities were originally built for [BinaryBuilder.jl](https://github.com/JuliaPackaging/BinaryBuilder.jl), however this functionality is now mature enough that it may be useful elsewhere.
## Basic usage
To make use of this toolkit, you will need to have a root filesystem image that you want to use.
This package can download a minimal Debian rootfs that can be used for quick tests; to launch `/bin/bash` in an interactive shell run the following:
```julia
using Sandbox
config = SandboxConfig(
Dict("/" => Sandbox.debian_rootfs());
stdin, stdout, stderr,
)
with_executor() do exe
run(exe, config, `/bin/bash -l`)
end
```
While this launches an interactive session due to hooking up `stdout`/`stdin`, one can easily capture output by setting `stdout` to an `IOBuffer`, or even a `PipeBuffer` to chain together multiple processes from different sandboxes.
## Getting more rootfs images
To use more interesting rootfs images, you can either create your own using tools such as [`debootstrap`](https://wiki.debian.org/Debootstrap) or you can pull one from docker by using the `pull_docker_image()` function defined within this package. See the [`contrib`](contrib/) directory for examples of both.
You can also check out the latest releases of the [`JuliaCI/rootfs-images` repository](https://github.com/JuliaCI/rootfs-images/), which curates a collection of rootfs images for use in CI workloads.
## Multiarch usage
Sandbox contains facilities for automatically registering `qemu-user-static` interpreters with `binfmt_misc` to support running on multiple architectures.
As of the time of this writing, this is only supported on when running on a Linux host with the `x86_64`, `aarch64` or `powerpc64le` host architectures.
The target architectures supported are `x86_64`, `i686`, `aarch64`, `armv7l` and `powerpc64le`.
Note that while `qemu-user-static` is a marvel of modern engineering, it does still impose some performance penalties, and there may be occasional bugs that break emulation faithfulness.
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MIT"
] | 2.0.2 | 0f7b398a74a9fd40e7286f4ae352ba959fe52151 | docs | 334 | # Sandbox.jl Documentation
[`Sandbox.jl`](https://github.com/staticfloat/Sandbox.jl) provides basic
containerization tools for running Linux guests on a variety of platforms.
## Index
```@index
```
## Types
```@autodocs
Modules = [Sandbox]
Order = [:type]
```
## Functions
```@autodocs
Modules = [Sandbox]
Order = [:function]
```
| Sandbox | https://github.com/staticfloat/Sandbox.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 4048 | using Documenter, Coluna, Literate, BlockDecomposition, Parameters, DocumenterMermaid, DynamicSparseArrays
JULIA_DEBUG=Documenter
TUTORIAL_GAP = joinpath(@__DIR__, "src", "start", "start.jl")
TUTORIAL_CUTS = joinpath(@__DIR__, "src", "start", "cuts.jl")
TUTORIAL_PRICING = joinpath(@__DIR__, "src", "start", "pricing.jl")
TUTORIAL_IDENTICAL_SP = joinpath(@__DIR__, "src", "start", "identical_sp.jl")
TUTORIAL_CUSTOMDATA = joinpath(@__DIR__, "src", "start", "custom_data.jl")
TUTORIAL_INITCOLS = joinpath(@__DIR__, "src", "start", "initial_columns.jl")
TUTORIAL_ADVANCED = joinpath(@__DIR__, "src", "start", "advanced_demo.jl")
TUTORIAL_STORAGE_API = joinpath(@__DIR__, "src", "api", "storage.jl")
OUTPUT_GAP = joinpath(@__DIR__, "src", "start")
OUTPUT_CUTS = joinpath(@__DIR__, "src", "start")
OUTPUT_PRICING = joinpath(@__DIR__, "src", "start")
OUTPUT_IDENTICAL_SP = joinpath(@__DIR__, "src", "start")
OUTPUT_CUSTOMDATA = joinpath(@__DIR__, "src", "start")
OUTPUT_INITCOLS = joinpath(@__DIR__, "src", "start")
OUTPUT_ADVANCED = joinpath(@__DIR__, "src", "start")
OUTPUT_STORAGE_API = joinpath(@__DIR__, "src", "api")
Literate.markdown(TUTORIAL_GAP, OUTPUT_GAP, documenter=true)
Literate.markdown(TUTORIAL_CUTS, OUTPUT_CUTS, documenter=true)
Literate.markdown(TUTORIAL_PRICING, OUTPUT_PRICING, documenter=true)
Literate.markdown(TUTORIAL_IDENTICAL_SP, OUTPUT_IDENTICAL_SP, documenter=true)
Literate.markdown(TUTORIAL_CUSTOMDATA, OUTPUT_CUSTOMDATA, documenter=true)
Literate.markdown(TUTORIAL_INITCOLS, OUTPUT_INITCOLS, documenter=true)
Literate.markdown(TUTORIAL_ADVANCED, OUTPUT_ADVANCED, documenter=true)
Literate.markdown(TUTORIAL_STORAGE_API, OUTPUT_STORAGE_API, documenter=true)
makedocs(
modules = [Coluna, BlockDecomposition, DynamicSparseArrays],
checkdocs = :exports,
sitename = "Coluna.jl",
authors = "Atoptima & contributors",
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true",
collapselevel = 2,
assets = ["assets/js/init.js"]
),
warnonly = true,
pages = Any[
"Introduction" => "index.md",
"Tutorials" => Any[
"Getting Started" => Any[
"Column generation" => joinpath("start", "start.md"),
"Cut Generation" => joinpath("start", "cuts.md"),
"Pricing callback" => joinpath("start", "pricing.md"),
"Identical subproblems" => joinpath("start", "identical_sp.md"),
"Custom data" => joinpath("start", "custom_data.md"),
"Initial columns callback" => joinpath("start", "initial_columns.md")
],
"Advanced tutorials" => Any[
"Column Generation and Benders on Location Routing" => joinpath("start", "advanced_demo.md"),
"Other classic problems" => joinpath("start", "other_pbs.md")
]
],
"Manual" => Any[
"Decomposition" => Any[
"Decomposition paradigms" => joinpath("man", "decomposition.md"),
"Setup decomposition using BlockDecomposition" => joinpath("man", "blockdecomposition.md")
],
"Configuration" => joinpath("man", "config.md"),
"Built-in algorithms" => joinpath("man", "algorithm.md"),
"User-defined Callbacks" => joinpath("man", "callbacks.md"),
"Presolve algorithm" => joinpath("man", "presolve.md"),
],
"API" => Any[
"Algorithms" => joinpath("api", "algos.md"),
"Benders" => joinpath("api", "benders.md"),
"Branching" => joinpath("api", "branching.md"),
"ColGen" => joinpath("api", "colgen.md"),
"Presolve" => joinpath("api", "presolve.md"),
"TreeSearch" => joinpath("api", "treesearch.md"),
"Storage" => joinpath("api", "storage.md"),
],
"Dynamic Sparse Arrays" => "dynamic_sparse_arrays.md",
"Q&A" => "qa.md",
]
)
deploydocs(
repo = "github.com/atoptima/Coluna.jl.git",
push_preview = true
)
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 1069 | # # Storage API
# ```@meta
# CurrentModule = Coluna
# ```
# ## API
# To summarize from a developer's point of view, there is a one-to-one correspondence between
# storage unit types and record types.
# This correspondence is implemented by methods
# `record_type(StorageUnitType)` and `storage_unit_type(RecordType)`.
# The developer must also implement methods `storage_unit(StorageUnitType)` and
# `record(RecordType, id, model, storage_unit)` that must call constructors of the custom
# storage unit and one of its associated records.
# Arguments of `record` allow the developer to record the state of entities from
# both the storage unit and the model.
# At last, he must implement `restore_from_record!(storage_unit, model, record)` to restore the
# state of the entities represented by the storage unit.
# Entities can be in the storage unit, the model, or both of them.
# ```@docs
# ColunaBase.record_type
# ColunaBase.storage_unit_type
# ColunaBase.storage_unit
# ColunaBase.record
# ColunaBase.restore_from_record!
# ``` | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 41870 | # # Advanced tutorial - Location Routing
# We demonstrate the main features of Coluna on a variant of the Location Routing problem.
# In the Location Routing Problem, we are given a set of facilities and a set of customers.
# Each customer must be delivered by a route starting from one facility. Each facility has
# a setup cost, while the cost of a route is the distance traveled.
# A route is defined as a vector of locations that satisfies the following rules:
# - it must start from an open facility location
# - it can finish at any customer (open route variant)
# - its length is limited (the maximum number of visited locations is equal to a constant `nb_positions`)
# Our objective is to minimize the sum of fixed costs for opening facilities and the total traveled distance
# while ensuring that each customer is covered by a route.
# In this tutorial, we will show you how to solve this problem by applying:
# - a direct approach with JuMP and a MILP solver (without Coluna)
# - a branch-and-price algorithm provided by Coluna, which uses a custom pricing callback to optimize pricing subproblems
# - a robust branch-cut-and-price algorithm, which separates valid inequalities on the original arc variables (so-called "robust" cuts)
# - a non-robust branch-cut-and-price algorithm, which separates valid inequalities on the route variables of the Dantzig-Wolfe reformulation (so-called "non-robust" cuts)
# - a multi-stage column generation algorithm using two different pricing solvers
# - a classic Benders decomposition approach, which uses the LP relaxation of the subproblem
# For illustration purposes, we use a small instance with 2 facilities and 7 customers.
# The maximum length of a route is fixed to 4.
# We also provide a larger instance in the last section of the tutorial.
nb_positions = 4
facilities_fixed_costs = [120, 150]
facilities = [1, 2]
customers = [3, 4, 5, 6, 7, 8, 9]
arc_costs =
[
0.0 25.3 25.4 25.4 35.4 37.4 31.9 24.6 34.2;
25.3 0.0 21.2 16.2 27.1 26.8 17.8 16.7 23.2;
25.4 21.2 0.0 14.2 23.4 23.8 18.3 17.0 21.6;
25.4 16.2 14.2 0.0 28.6 28.8 22.6 15.6 29.5;
35.4 27.1 23.4 28.6 0.0 42.1 30.4 24.9 39.1;
37.4 26.8 23.8 28.8 42.1 0.0 32.4 29.5 38.2;
31.9 17.8 18.3 22.6 30.4 32.4 0.0 22.5 30.7;
24.6 16.7 17.0 15.6 24.9 29.5 22.5 0.0 21.4;
34.2 23.2 21.6 29.5 39.1 38.2 30.7 21.4 0.0;
]
locations = vcat(facilities, customers)
nb_customers = length(customers)
nb_facilities = length(facilities)
positions = 1:nb_positions;
# In this tutorial, we will use the following packages:
using JuMP, HiGHS, GLPK, BlockDecomposition, Coluna;
# We want to set an upper bound `nb_routes_per_facility` on the number of routes starting from a facility.
# This limit is computed as follows:
## We compute the minimum number of routes needed to visit all customers:
nb_routes = Int(ceil(nb_customers / nb_positions))
## We define the upper bound `nb_routes_per_facility`:
nb_routes_per_facility = min(Int(ceil(nb_routes / nb_facilities)) * 2, nb_routes)
routes_per_facility = 1:nb_routes_per_facility;
# ## Direct model
# First, we solve the problem by a direct approach, using the HiGHS solver.
# We start by creating a JuMP model:
model = JuMP.Model(HiGHS.Optimizer);
# We declare 3 types of binary variables:
## y[j] equals 1 if facility j is open; 0 otherwise.
@variable(model, y[j in facilities], Bin)
## z[u,v] equals 1 if a vehicle travels from u to v; 0 otherwise
@variable(model, z[u in locations, v in locations], Bin)
## x[i,j,k,p] equals 1 if customer i is delivered from facility j at position p of route k; 0 otherwise
@variable(model, x[i in customers, j in facilities, k in routes_per_facility, p in positions], Bin);
# We define the constraints:
## each customer is visited once
@constraint(model, cov[i in customers],
sum(x[i, j, k, p] for j in facilities, k in routes_per_facility, p in positions) == 1)
## a facility is open if there is a route starting from it
@constraint(model, setup[j in facilities, k in routes_per_facility],
sum(x[i, j, k, 1] for i in customers) <= y[j])
## flow conservation
@constraint(model, flow_conservation[j in facilities, k in routes_per_facility, p in positions; p > 1],
sum(x[i, j, k, p] for i in customers) <= sum(x[i, j, k, p-1] for i in customers))
## there is an arc between two customers whose demand is satisfied by the same route at consecutive positions
@constraint(model, route_arc[i in customers, l in customers, j in facilities, k in routes_per_facility, p in positions; p > 1 && i != l],
z[i, l] >= x[l, j, k, p] + x[i, j, k, p-1] - 1)
## there is an arc between facility `j` and the first customer visited by route `k` from facility `j`
@constraint(model, start_arc[i in customers, j in facilities, k in routes_per_facility],
z[j, i] >= x[i, j, k, 1]);
# We set the objective function:
@objective(model, Min,
sum(arc_costs[u, v] * z[u, v] for u in locations, v in locations)
+
sum(facilities_fixed_costs[j] * y[j] for j in facilities));
# and we optimize the model:
optimize!(model)
objective_value(model)
# We find an optimal solution involving two routes starting from facility 1:
# - `1` -> `8` -> `9` -> `3` -> `6`
# - `1` -> `4` -> `5` -> `7`
# ## Dantzig-Wolfe decomposition and Branch-and-Price
# One can solve the problem by exploiting its structure with a Dantzig-Wolfe decomposition approach.
# The subproblem induced by such decomposition amounts to generate routes starting from each facility.
# A possible decomposition is to consider a subproblem associated with each vehicle, generating the vehicle route.
# However, for a given facility, the vehicles that are identical will give rise to the same subproblem and route solutions.
# So instead of this decomposition with several identical subproblems for each facility, we define below a single subproblem per facility.
# For each subproblem, we define its multiplicity, i.e. we bound the number of solutions of this subproblem that can be used in a master solution.
# The following method creates the model according to the decomposition described:
function create_model(optimizer, pricing_algorithms)
## A user should resort to axes to communicate to Coluna how to decompose a formulation.
## For our problem, we declare an axis over the facilities, thus `facilities_axis` contain subproblem indices.
## We must use `facilities_axis` instead of `facilities` in the declaration of the
## variables and constraints that belong to pricing subproblems.
@axis(facilities_axis, collect(facilities))
## We declare a `BlockModel` instead of `Model`.
model = BlockModel(optimizer)
## `y[j]` is a master variable equal to 1 if the facility j is open; 0 otherwise
@variable(model, y[j in facilities], Bin)
## `x[i,j]` is a subproblem variable equal to 1 if customer i is delivered from facility j; 0 otherwise.
@variable(model, x[i in customers, j in facilities_axis], Bin)
## `z[u,v]` is assimilated to a subproblem variable equal to 1 if a vehicle travels from u to v; 0 otherwise.
## we don't use the `facilities_axis` axis here because the `z` variables are defined as
## representatives of the subproblem variables later in the model.
@variable(model, z[u in locations, v in locations], Bin)
## `cov` constraints are master constraints ensuring that each customer is visited once.
@constraint(model, cov[i in customers],
sum(x[i, j] for j in facilities) >= 1)
## `open_facilities` are master constraints ensuring that the depot is open if one vehicle
## leaves it.
@constraint(model, open_facility[j in facilities],
sum(z[j, i] for i in customers) <= y[j] * nb_routes_per_facility)
## We don't need to describe the subproblem constraints because we use a pricing callback.
## We set the objective function:
@objective(model, Min,
sum(arc_costs[u, v] * z[u, v] for u in locations, v in locations) +
sum(facilities_fixed_costs[j] * y[j] for j in facilities)
)
## We perform decomposition over the facilities.
@dantzig_wolfe_decomposition(model, dec, facilities_axis)
## Subproblems generate routes starting from each facility.
## The number of routes from each facility is at most `nb_routes_per_facility`.
subproblems = BlockDecomposition.getsubproblems(dec)
specify!.(subproblems, lower_multiplicity=0, upper_multiplicity=nb_routes_per_facility, solver=pricing_algorithms)
## We define `z` as a subproblem variable common to all subproblems.
## Each implicit variable `z` replaces a sum of explicit `z'` variables: `z[u,v] = sum(z'[j,u,v] for j in facilities_axis)`
## This way the model is simplified, and column generation is accelerated as the reduced cost for pair `z[u,v]` is calculated only once
## instead of performing the same reduced cost calculation for variables `z'[j,u,v]`, `j in facilities_axis`.
subproblemrepresentative.(z, Ref(subproblems))
return model, x, y, z, cov
end;
# Contrary to the direct model, we do not add constraints to ensure the
# feasibility of the routes because we solve our subproblems in a pricing callback.
# The user who implements the pricing callback has the responsibility to create only feasible routes.
# We setup Coluna:
coluna = optimizer_with_attributes(
Coluna.Optimizer,
"params" => Coluna.Params(
solver=Coluna.Algorithm.TreeSearchAlgorithm( ## default branch-and-bound of Coluna
maxnumnodes=100,
conqueralg=Coluna.ColCutGenConquer() ## default column and cut generation of Coluna
) ## default branch-cut-and-price
),
"default_optimizer" => GLPK.Optimizer # GLPK for the master & the subproblems
);
# ### Pricing callback
# If the user declares all the necessary subproblem constraints and possibly additional subproblem variables
# to describe the set of feasible subproblem solutions, Coluna may perform automatic Dantzig-Wolfe
# decomposition in which the pricing subproblems are solved by applying a (default) MIP solver.
# In our case, applying a MIP solver is not the most efficient way to solve the pricing problem.
# Therefore, we implement an ad-hoc algorithm for solving the pricing subproblems and declare it as a pricing callback.
# In our pricing callback for a given facility, we inspect all feasible routes enumerated before calling the branch-cut-and-price algorithm.
# The inspection algorithm calculates the reduced cost for each enumerated route and returns a route with the minimum reduced cost.
# We first define a structure to store the routes:
mutable struct Route
length::Int # record the length of the route (number of visited customers + 1)
path::Vector{Int} # record the sequence of visited customers
end;
# We can reduce the number of enumerated routes by exploiting the following property.
# Consider two routes starting from the same facility and visiting the same subset of locations (customers).
# These two routes correspond to columns with the same vector of coefficients in master constraints.
# A solution containing the route with a larger traveled distance (i.e., larger route original cost) is dominated:
# this dominated route can be replaced by the other route without increasing the total solution cost.
# Therefore, for each subset of locations of a size not exceeding the maximum one,
# the enumeration procedure keeps only one route visiting this subset, the one with the smallest cost.
# A method that computes the cost of a route:
function route_original_cost(arc_costs, route::Route)
route_cost = 0.0
path = route.path
path_length = route.length
for i in 1:(path_length-1)
route_cost += arc_costs[path[i], path[i+1]]
end
return route_cost
end;
# This procedure finds a least-cost sequence of visiting the given set of customers starting from a given facility.
function best_visit_sequence(arc_costs, cust_subset, facility_id)
## generate all the possible visit orders
set_size = size(cust_subset)[1]
all_paths = collect(multiset_permutations(cust_subset, set_size))
all_routes = Vector{Route}()
for path in all_paths
## add the first index i.e. the facility id
enpath = vcat([facility_id], path)
## length of the route = 1 + number of visited customers
route = Route(set_size + 1, enpath)
push!(all_routes, route)
end
## compute each route original cost
routes_costs = map(r ->
(r, route_original_cost(arc_costs, r)), all_routes)
## keep only the best visit sequence
tmp = argmin([c for (_, c) in routes_costs])
(best_order, _) = routes_costs[tmp]
return best_order
end;
# We are now able to compute a dominating route for all the possible customers' subsets,
# given a facility id:
using Combinatorics
function best_route_forall_cust_subsets(arc_costs, customers, facility_id, max_size)
best_routes = Vector{Route}()
all_subsets = Vector{Vector{Int}}()
for subset_size in 1:max_size
subsets = collect(combinations(customers, subset_size))
for s in subsets
push!(all_subsets, s)
end
end
for s in all_subsets
route_s = best_visit_sequence(arc_costs, s, facility_id)
push!(best_routes, route_s)
end
return best_routes
end;
# We store all the information given by the enumeration phase in a dictionary.
# For each facility id, we match a vector of routes that are the best visiting sequences
# for each possible subset of customers.
routes_per_facility = Dict(
j => best_route_forall_cust_subsets(arc_costs, customers, j, nb_positions) for j in facilities
)
# Our pricing callback must compute the reduced cost of each route,
# given the reduced cost of the subproblem variables `x` and `z`.
# Remember that subproblem variables `z` are implicitly defined by master representative variables `z`.
# We remark that `z` variables participate only in the objective function.
# Thus their reduced costs are initially equal to the original costs (i.e., objective coefficients)
# This is not true anymore after adding branching constraints and robust cuts involving variables `z`.
# We need methods to compute the contributions to the reduced cost of the `x` and `z` variables:
function x_contribution(route::Route, j::Int, x_red_costs)
x = 0.0
visited_customers = route.path[2:route.length]
for i in visited_customers
x += x_red_costs["x_$(i)_$(j)"]
end
return x
end;
function z_contribution(route::Route, z_red_costs)
z = 0.0
for i in 1:(route.length-1)
current_position = route.path[i]
next_position = route.path[i+1]
z += z_red_costs["z_$(current_position)_$(next_position)"]
end
return z
end;
# We are now able to write our pricing callback:
function pricing_callback(cbdata)
## Get the id of the facility.
j = BlockDecomposition.indice(BlockDecomposition.callback_spid(cbdata, model))
## Retrieve variables reduced costs.
z_red_costs = Dict(
"z_$(u)_$(v)" => BlockDecomposition.callback_reduced_cost(cbdata, z[u, v]) for u in locations, v in locations)
x_red_costs = Dict(
"x_$(i)_$(j)" => BlockDecomposition.callback_reduced_cost(cbdata, x[i, j]) for i in customers
)
## Keep route with minimum reduced cost.
red_costs_j = map(r -> (
r,
x_contribution(r, j, x_red_costs) + z_contribution(r, z_red_costs) # the reduced cost of a route is the sum of the contribution of the variables
), routes_per_facility[j]
)
min_index = argmin([x for (_, x) in red_costs_j])
(best_route, min_reduced_cost) = red_costs_j[min_index]
## Retrieve the route's arcs.
best_route_arcs = Vector{Tuple{Int,Int}}()
for i in 1:(best_route.length-1)
push!(best_route_arcs, (best_route.path[i], best_route.path[i+1]))
end
best_route_customers = best_route.path[2:best_route.length]
## Create the solution (send only variables with non-zero values).
z_vars = [z[u, v] for (u, v) in best_route_arcs]
x_vars = [x[i, j] for i in best_route_customers]
sol_vars = vcat(z_vars, x_vars)
sol_vals = ones(Float64, length(z_vars) + length(x_vars))
sol_cost = min_reduced_cost
## Submit the solution to the subproblem to Coluna.
MOI.submit(model, BlockDecomposition.PricingSolution(cbdata), sol_cost, sol_vars, sol_vals)
## Submit the dual bound to the solution of the subproblem.
## This bound is used to compute the contribution of the subproblem to the lagrangian
## bound in column generation.
MOI.submit(model, BlockDecomposition.PricingDualBound(cbdata), sol_cost) ## optimal solution
end;
# Create the model:
model, x, y, z, _ = create_model(coluna, pricing_callback);
# Solve:
JuMP.optimize!(model)
# ### Strengthening the master with linear valid inequalities on the original variables (so-called "robust" cuts)
# To improve the quality of the linear relaxation, a family of classic facility location valid inequalities can be used:
#
# ```math
# x_{ij} \leq y_j\; \forall i \in I, \forall j \in J
# ```
# where $I$ is the set of customers and $J$ the set of facilities.
# We declare a structure representing an inequality in this family:
struct OpenFacilityInequality
facility_id::Int
customer_id::Int
end
# To identify violated valid inequalities from a current master LP solution,
# we proceed by enumeration (i.e. iterating over all pairs of customer and facility).
# Enumeration separation procedure is implemented in the following callback.
function valid_inequalities_callback(cbdata)
## Get variables valuations and store them in dictionaries.
x_vals = Dict(
"x_$(i)_$(j)" => BlockDecomposition.callback_value(cbdata, x[i, j]) for i in customers, j in facilities
)
y_vals = Dict(
"y_$(j)" => BlockDecomposition.callback_value(cbdata, y[j]) for j in facilities
)
## Separate the valid inequalities (i.e. retrieve the inequalities that are violated by
## the current solution) by enumeration.
inequalities = OpenFacilityInequality[]
for j in facilities
y_j = y_vals["y_$(j)"]
for i in customers
x_i_j = x_vals["x_$(i)_$(j)"]
if x_i_j > y_j
push!(inequalities, OpenFacilityInequality(j, i))
end
end
end
## Add the valid inequalities to the model.
for ineq in inequalities
constr = JuMP.@build_constraint(x[ineq.customer_id, ineq.facility_id] <= y[ineq.facility_id])
MOI.submit(model, MOI.UserCut(cbdata), constr)
end
end;
# We re-declare the model and optimize it with these valid inequalities:
model, x, y, z, _ = create_model(coluna, pricing_callback);
MOI.set(model, MOI.UserCutCallback(), valid_inequalities_callback);
JuMP.optimize!(model)
# ### Strengthening the master with valid inequalities on the column generation variables (so-called "non-robust" cuts)
# In order to further strengthen the linear relaxation of the Dantzig-Wolfe reformulation,
# we separate a family of subset-row cuts, which is a subfamily of Chvátal-Gomory rank-1 cuts (R1C),
# obtained from the set-partitioning constraints.
# These cuts cannot be expressed as a linear combination of the original variables of the model.
# Instead, they are expressed with the master columns variables $λ_k$, $k \in K$, where $K$ is the set of generated columns
# or set of solutions returned by the pricing subproblems.
# Subset-row cuts are "non-robust" in the sense that they modify the structure of the pricing subproblems,
# and not just the reduced cost of subproblem variables. Thus, the implementation of the pricing callback should
# be updated to take into account dual costs associated with non-robust cutting planes.
# Each Chvátal-Gomory rank-1 cut is characterized by a subset of set-partitioning constraints, or equivalently by a subset $C$ of customers,
# and a multiplier $\alpha_i$ for each customer $i\in C$:
# ```math
# \sum_{k \in K} \lfloor \sum_{i \in C} \alpha_i \tilde{x}^k_{i,j} \lambda_{k} \rfloor \leq \lfloor \sum_{i\in C} \alpha_i \rfloor, \; C \subseteq I,
# ```
# where $\tilde{x}^k_{ij}$ is the value of the variable $x_{ij}$ in the $k$-th column generated.
# For subset-row cuts, $|C|=3$, and $\alpha_i=\frac{1}{2}$, $i\in C$.
# Since we obtain subset-row cuts based on set-partitioning constraints, we must be able to
# differentiate them from the other constraints of the model.
# To do this, we exploit a feature of Coluna that allows us to attach custom data to the
# constraints and variables of a model, via the add-ons of BlockDecomposition package.
# First, we create special custom data with the only information we need to characterize
# our cover constraints: the customer id that corresponds to this constraint.
struct CoverConstrData <: BlockDecomposition.AbstractCustomConstrData
customer::Int
end
# We re-create the model:
(model, x, y, z, cov) = create_model(coluna, pricing_callback);
# We declare our custom data to Coluna and we attach one custom data to each cover constraint
BlockDecomposition.customconstrs!(model, CoverConstrData);
for i in customers
customdata!(cov[i], CoverConstrData(i))
end
# We perform the separation by enumeration (i.e. iterating over all subsets of customers of size three).
# The subset-row cut has the following form:
# ```math
# \sum_{k \in K} \tilde{\alpha}(C, k) \lambda_{k} \leq 1\; C \subseteq I, |C| = 3,
# ```
# where coefficient $\tilde{\alpha}(C, k)$ equals $1$ if route $k$ visits at least two customers of $C$; $0$ otherwise.
# For instance, if we consider separating a cut over constraints `cov[3]`, `cov[6]` and `cov[8]`,
# then the route `1`->`4`->`6`->`7` has a zero coefficient while the route `1`->`4`->`6`->`3`
# has a coefficient equal to one.
# Since columns are generated dynamically, we cannot pre-compute the coefficients of columns in the subset-row cuts.
# Instead, coefficients are computed dynamically via a user-defined `computecoeff` method which takes
# a cut and a column as arguments. To recognize which cut and which column are passed to the method,
# custom data structures are attached to the cut constraints and the master variables.
# When a new column is generated, Coluna computes its coefficients in the original constraints and robust cuts
# using coefficients of subproblem variables in the master constraints.
# Coluna retrieves coefficients of the new column in the non-robust cuts by calling the `computecoeff` method for the column and each such cut.
# When a new non-robust cut is generated, Coluna retrieves the coefficients of columns in this cut by calling the `computecoeff` method for the cut and all existing columns.
# We now proceed to the implementation of necessary data structures and methods needed to support the subset-row cuts.
# First, we attach a custom data structure to master columns `λ_k` associated with a given route `k`.
# They record the set of customers that are visited by the given route `k`.
# Thus, to each `λ_k`, we associate a `R1cVarData` structure that carries the customers it visits.
struct R1cVarData <: BlockDecomposition.AbstractCustomVarData
visited_locations::Vector{Int}
end
# Then, we attach a `R1cCutData` custom data structure to the subset-row cuts.
# It contains the set $C$ of customers characterizing the cut.
struct R1cCutData <: BlockDecomposition.AbstractCustomVarData
cov_constrs::Vector{Int}
end
# We declare our custom data to Coluna via BlockDecomposition add-ons:
BlockDecomposition.customvars!(model, R1cVarData)
BlockDecomposition.customconstrs!(model, [CoverConstrData, R1cCutData]);
# The next method calculates the coefficients of a column `λ_k` in a subset-row cut:
function Coluna.MathProg.computecoeff(
var_custom_data::R1cVarData, constr_custom_data::R1cCutData
)
return floor(1 / 2 * length(var_custom_data.visited_locations ∩ constr_custom_data.cov_constrs))
end
# We also need to define a second method for the case of the cover constraints.
# Indeed, we use custom data to know the customer attached to each cover constraint
# There is no contribution of the non-robust part of the coefficient of the `λ_k`, so
# the method returns 0.
function Coluna.MathProg.computecoeff(::R1cVarData, ::CoverConstrData)
return 0
end
# We are now able to write our rank-one cut callback completely:
function r1c_callback(cbdata)
original_sol = cbdata.orig_sol
master = Coluna.MathProg.getmodel(original_sol)
## Retrieve the cover constraints.
cov_constrs = Int[]
for constr in values(Coluna.MathProg.getconstrs(master))
constr_custom_data = Coluna.MathProg.getcustomdata(master, constr)
if typeof(constr_custom_data) <: CoverConstrData
push!(cov_constrs, constr_custom_data.customer)
end
end
## Retrieve the master columns λ and their values in the current fractional solution
lambdas = Tuple{Float64,Coluna.MathProg.Variable}[]
for (var_id, val) in original_sol
if Coluna.MathProg.getduty(var_id) <= Coluna.MathProg.MasterCol
push!(lambdas, (val, Coluna.MathProg.getvar(master, var_id)))
end
end
## Separate the valid subset-row cuts violated by the current solution.
## For a fixed subset of customers of size three, iterate on the master columns
## and check if lhs > 1:
for cov_constr_subset in collect(combinations(cov_constrs, 3))
lhs = 0
for lambda in lambdas
(val, var) = lambda
var_custom_data = Coluna.MathProg.getcustomdata(master, var)
if !isnothing(var_custom_data)
coeff = floor(1 / 2 * length(var_custom_data.visited_locations ∩ cov_constr_subset))
lhs += coeff * val
end
end
if lhs > 1
## Create the constraint and add it to the model.
MOI.submit(model,
MOI.UserCut(cbdata),
JuMP.ScalarConstraint(JuMP.AffExpr(0.0), MOI.LessThan(1.0)),
R1cCutData(cov_constr_subset)
)
end
end
end;
# When creating non-robust constraints, only the linear (i.e., robust) part is passed to the model.
# In our case, the constraint `0 <= 1` is passed.
# As explained above, the non-robust part is computed by calling the `computecoeff` method using
# the structure of type `R1cCutData` provided.
# Finally, we need to update our pricing callback to take into account the active non-robust cuts.
# The contribution of these cuts to the reduced cost of a column is not captured by the reduced cost
# of subproblem variables. We must therefore take this contribution into account manually, by inquiring
# the set of existing non-robust cuts and their values in the current dual solution.
# The contribution of a subset-row cut to the reduced cost of a route is managed by the following method:
function r1c_contrib(route::Route, custduals)
cost = 0
if !isempty(custduals)
for (r1c_cov_constrs, dual) in custduals
coeff = floor(1 / 2 * length(route.path ∩ r1c_cov_constrs))
cost += coeff * dual
end
end
return cost
end;
# We re-write our pricing callback to:
# - retrieve the dual cost of the subset-row cuts
# - take into account the contribution of the subset-row cuts in the reduced cost of the route
# - attach custom data to the route so that its coefficient in the existing non-robust cuts can be computed
function pricing_callback(cbdata)
j = BlockDecomposition.indice(BlockDecomposition.callback_spid(cbdata, model))
z_red_costs = Dict(
"z_$(u)_$(v)" => BlockDecomposition.callback_reduced_cost(cbdata, z[u, v]) for u in locations, v in locations
)
x_red_costs = Dict(
"x_$(i)_$(j)" => BlockDecomposition.callback_reduced_cost(cbdata, x[i, j]) for i in customers
)
## FIRST CHANGE HERE:
## Get the dual values of the constraints of the specific type to compute the contributions of
## non-robust cuts to the cost of the solution:
master = cbdata.form.parent_formulation
custduals = Tuple{Vector{Int},Float64}[]
for (_, constr) in Coluna.MathProg.getconstrs(master)
constr_custom_data = Coluna.MathProg.getcustomdata(master, constr)
if typeof(constr_custom_data) == R1cCutData
push!(custduals, (
constr_custom_data.cov_constrs,
Coluna.MathProg.getcurincval(master, constr)
))
end
end
## END OF FIRST CHANGE
## SECOND CHANGE HERE:
## Keep route with the minimum reduced cost: contribution of the subproblem variables and
## the non-robust cuts.
red_costs_j = map(r -> (
r,
x_contribution(r, j, x_red_costs) + z_contribution(r, z_red_costs) - r1c_contrib(r, custduals)
), routes_per_facility[j]
)
## END OF SECOND CHANGE
min_index = argmin([x for (_, x) in red_costs_j])
best_route, min_reduced_cost = red_costs_j[min_index]
best_route_arcs = Tuple{Int,Int}[]
for i in 1:(best_route.length-1)
push!(best_route_arcs, (best_route.path[i], best_route.path[i+1]))
end
best_route_customers = best_route.path[2:best_route.length]
z_vars = [z[u, v] for (u, v) in best_route_arcs]
x_vars = [x[i, j] for i in best_route_customers]
sol_vars = vcat(z_vars, x_vars)
sol_vals = ones(Float64, length(z_vars) + length(x_vars))
sol_cost = min_reduced_cost
## Submit the solution of the subproblem to Coluna
## THIRD CHANGE HERE:
## You must attach the visited customers in the structure of type `R1cVarData` to the solution of the subproblem
MOI.submit(
model, BlockDecomposition.PricingSolution(cbdata), sol_cost, sol_vars, sol_vals,
R1cVarData(best_route.path)
)
## END OF THIRD CHANGE
MOI.submit(model, BlockDecomposition.PricingDualBound(cbdata), sol_cost)
end
MOI.set(model, MOI.UserCutCallback(), r1c_callback);
JuMP.optimize!(model)
# ### Multi-stage pricing callback
# In this section, we implement a pricing heuristic that can be used together with the exact
# pricing callback to generate subproblems solutions.
# The idea of the heuristic is very simple:
# - Given a facility `j`, the heuristic finds the closest customer to `j` and adds it to the route.
# - Then, while the reduced cost keeps improving and the maximum length of the route is not reached, the heuristic computes and adds to the route the nearest neighbor to the last customer of the route.
# We first define an auxiliary function used to compute the route tail's nearest neighbor at each step:
function add_nearest_neighbor(route::Route, customers, costs)
## Get the last customer of the route.
loc = last(route.path)
## Initialize its nearest neighbor to zero and mincost to infinity.
(nearest, mincost) = (0, Inf)
## Compute nearest and mincost.
for i in customers
if !(i in route.path) # implying in particular (i != loc)
if (costs[loc, i] < mincost)
nearest = i
mincost = costs[loc, i]
end
end
end
## Add the last customer's nearest neighbor to the route.
if nearest != 0
push!(route.path, nearest)
route.length += 1
end
end;
# We then define our heuristic for the enumeration of the routes, the method returns the best route found by the heuristic together with its cost:
function enumeration_heuristic(x_red_costs, z_red_costs, j)
## Initialize our "greedy best route".
best_route = Route(1, [j])
## Initialize the route's cost to zero.
current_redcost = 0.0
old_redcost = Inf
## main loop
while (current_redcost < old_redcost)
add_nearest_neighbor(best_route, customers, arc_costs)
old_redcost = current_redcost
current_redcost = x_contribution(best_route, j, x_red_costs) +
z_contribution(best_route, z_red_costs)
## Max length is reached.
if best_route.length == nb_positions
break
end
end
return (best_route, current_redcost)
end;
# We can now define our heuristic pricing callback:
function approx_pricing(cbdata)
j = BlockDecomposition.indice(BlockDecomposition.callback_spid(cbdata, model))
z_red_costs = Dict(
"z_$(u)_$(v)" => BlockDecomposition.callback_reduced_cost(cbdata, z[u, v]) for u in locations, v in locations
)
x_red_costs = Dict(
"x_$(i)_$(j)" => BlockDecomposition.callback_reduced_cost(cbdata, x[i, j]) for i in customers
)
## Call the heuristic to elect the "greedy best route":
best_route, sol_cost = enumeration_heuristic(x_red_costs, z_red_costs, j)
## Build the solution:
best_route_arcs = Vector{Tuple{Int,Int}}()
for i in 1:(best_route.length-1)
push!(best_route_arcs, (best_route.path[i], best_route.path[i+1]))
end
best_route_customers = best_route.path[2:length(best_route.path)]
z_vars = [z[u, v] for (u, v) in best_route_arcs]
x_vars = [x[i, j] for i in best_route_customers]
sol_vars = vcat(z_vars, x_vars)
sol_vals = ones(Float64, length(z_vars) + length(x_vars))
MOI.submit(model, BlockDecomposition.PricingSolution(cbdata), sol_cost, sol_vars, sol_vals)
## As the procedure is inexact, no dual bound can be computed, we set it to -Inf.
MOI.submit(model, BlockDecomposition.PricingDualBound(cbdata), -Inf)
end;
# We set the solver; `colgen_stages_pricing_solvers` indicates which solver to use first (here it is `approx_pricing`)
coluna = JuMP.optimizer_with_attributes(
Coluna.Optimizer,
"default_optimizer" => GLPK.Optimizer,
"params" => Coluna.Params(
solver=Coluna.Algorithm.BranchCutAndPriceAlgorithm(
maxnumnodes=100,
colgen_stages_pricing_solvers=[2, 1]
)
)
);
# We add the two pricing algorithms to our model:
model, x, y, z, cov = create_model(coluna, [approx_pricing, pricing_callback]);
# We declare our custom data to Coluna:
BlockDecomposition.customvars!(model, R1cVarData)
BlockDecomposition.customconstrs!(model, [CoverConstrData, R1cCutData]);
for i in customers
customdata!(cov[i], CoverConstrData(i))
end
# Optimize:
JuMP.optimize!(model)
# ## Benders decomposition
# In this section, we show how one can solve the linear relaxation of the master program of
# a Benders Decomposition approach to this facility location demo problem.
# The first-stage decisions consist in choosing a subset of facilities to open.
# The second-stage decisions consist in choosing the routes that are assigned to each facility.
# The second stage problem is an integer program, so for simplicity, we use its linear relaxation instead. To improve the quality of this
# relaxation, we enumerate the routes and use one variable per route. As this approach is practical only for small instances,
# we use it only for illustration purposes. For larger instances, we would have to implement a column generation approach
# to solve the subproblem, i.e., the Benders cut separation problem.
# In the same spirit as the above models, we use the variables.
# Let `y[j]` equal 1 if the facility `j` is open and 0 otherwise.
# Let `λ[j,k]` equal 1 if route `k` starting from facility `j` is selected and 0 otherwise.
# Since there is only one subproblem in the second stage, we introduce a fake axis that contains
# only one element. This approach can be generalized to the case where customer demand uncertainty is expressed with scenarios.
# In this case, we would have one subproblem for each scenario, and the axis would have been defined for the set of scenarios.
# In our case, the set of scenarios consists of one "fake" scenario.
fake = 1
@axis(axis, collect(fake:fake))
coluna = JuMP.optimizer_with_attributes(
Coluna.Optimizer,
"params" => Coluna.Params(solver=Coluna.Algorithm.BendersCutGeneration()
),
"default_optimizer" => GLPK.Optimizer
)
model = BlockModel(coluna);
# We introduce auxiliary structures to improve the clarity of the code.
## routes covering customer i from facility j.
covering_routes = Dict(
(j, i) => findall(r -> (i in r.path), routes_per_facility[j]) for i in customers, j in facilities
);
## routes costs from facility j.
routes_costs = Dict(
j => [route_original_cost(arc_costs, r) for r in routes_per_facility[j]] for j in facilities
);
# We declare the variables.
@variable(model, 0 <= y[j in facilities] <= 1) ## 1st stage
@variable(model, 0 <= λ[f in axis, j in facilities, k in 1:length(routes_per_facility[j])] <= 1); ## 2nd stage
# We declare the constraints.
## Linking constraints
@constraint(model, open[fake in axis, j in facilities, k in 1:length(routes_per_facility[j])],
y[j] >= λ[fake, j, k])
## Second-stage constraints
@constraint(model, cover[fake in axis, i in customers],
sum(λ[fake, j, k] for j in facilities, k in covering_routes[(j, i)]) >= 1)
## Second-stage constraints
@constraint(model, limit_nb_routes[fake in axis, j in facilities],
sum(λ[fake, j, q] for q in 1:length(routes_per_facility[j])) <= nb_routes_per_facility
)
## First-stage constraint
## This constraint is redundant, we add it in order not to start with an empty master problem
@constraint(model, min_opening,
sum(y[j] for j in facilities) >= 1)
@objective(model, Min,
sum(facilities_fixed_costs[j] * y[j] for j in facilities) +
sum(routes_costs[j][k] * λ[fake, j, k] for j in facilities, k in 1:length(routes_per_facility[j])));
# We perform the decomposition over the axis and we optimize the problem.
@benders_decomposition(model, dec, axis)
JuMP.optimize!(model)
# ## Example of comparison of the dual bounds
# In this section, we use a larger instance with 3 facilities and 13 customers. We solve only the root node and look at the dual bound:
# - with the standard column generation (without cut separation)
# - by adding robust cuts
# - by adding non-robust cuts
# - by adding both robust and non-robust cuts
nb_positions = 6
facilities_fixed_costs = [120, 150, 110]
facilities = [1, 2, 3]
customers = [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
arc_costs = [
0.0 125.6 148.9 182.2 174.9 126.2 158.6 172.9 127.4 133.1 152.6 183.8 182.4 176.9 120.7 129.5;
123.6 0.0 175.0 146.7 191.0 130.4 142.5 139.3 130.1 133.3 163.8 127.8 139.3 128.4 186.4 115.6;
101.5 189.6 0.0 198.2 150.5 159.6 128.3 133.0 195.1 167.3 187.3 178.1 171.7 161.5 142.9 142.1;
159.4 188.4 124.7 0.0 174.5 174.0 142.6 102.5 135.5 184.4 121.6 112.1 139.9 105.5 190.9 140.7;
157.7 160.3 184.2 196.1 0.0 115.5 175.2 153.5 137.7 141.3 109.5 107.7 125.3 151.0 133.1 140.6;
145.2 120.4 106.7 138.8 157.3 0.0 153.6 192.2 153.2 184.4 133.6 164.9 163.6 126.3 121.3 161.4;
182.6 152.1 178.8 184.1 150.8 163.5 0.0 164.1 104.0 100.5 117.3 156.1 115.1 168.6 186.5 100.2;
144.9 193.8 146.1 191.4 136.8 172.7 108.1 0.0 131.0 166.3 116.4 187.0 161.3 148.2 162.1 116.0;
173.4 199.1 132.9 133.2 139.8 112.7 138.1 118.8 0.0 173.4 131.8 180.6 191.0 133.9 178.7 108.7;
150.5 171.0 163.8 171.5 116.3 149.1 124.0 192.5 188.8 0.0 112.2 188.7 197.3 144.9 110.7 186.6;
153.6 104.4 141.1 124.7 121.1 137.5 190.3 177.1 194.4 135.3 0.0 146.4 132.7 103.2 150.3 118.4;
112.5 133.7 187.1 170.0 130.2 177.7 159.2 169.9 183.8 101.6 156.2 0.0 114.7 169.3 149.9 125.3;
151.5 165.6 162.1 133.4 159.4 200.5 132.7 199.9 136.8 121.3 118.1 123.4 0.0 104.8 197.1 134.4;
195.0 101.1 194.1 160.1 147.1 164.6 137.2 138.6 166.7 191.2 169.2 186.0 171.2 0.0 106.8 150.9;
158.2 152.7 104.0 136.0 168.9 175.7 139.2 163.2 102.7 153.3 185.9 164.0 113.2 200.7 0.0 127.4;
136.6 174.3 103.2 131.4 107.8 191.6 115.1 127.6 163.2 123.2 173.3 133.0 120.5 176.9 173.8 0.0;
]
locations = vcat(facilities, customers)
nb_customers = length(customers)
nb_facilities = length(facilities)
positions = 1:nb_positions;
routes_per_facility = Dict(
j => best_route_forall_cust_subsets(arc_costs, customers, j, nb_positions) for j in facilities
);
# We set `maxnumnodes` to zero to optimize only the root node:
coluna = optimizer_with_attributes(
Coluna.Optimizer,
"params" => Coluna.Params(
solver=Coluna.Algorithm.TreeSearchAlgorithm(
maxnumnodes=0,
conqueralg=Coluna.ColCutGenConquer()
)
),
"default_optimizer" => GLPK.Optimizer
);
# We define a method to call both `valid_inequalities_callback` and `r1c_callback`:
function cuts_callback(cbdata)
valid_inequalities_callback(cbdata)
r1c_callback(cbdata)
end
function attach_data(model, cov)
BlockDecomposition.customvars!(model, R1cVarData)
BlockDecomposition.customconstrs!(model, [CoverConstrData, R1cCutData])
for i in customers
customdata!(cov[i], CoverConstrData(i))
end
end;
# First, we solve the root node with the "raw" decomposition model:
model, x, y, z, cov = create_model(coluna, pricing_callback)
attach_data(model, cov)
# dual bound found after optimization = 1588.00
# Then, we re-solve it with the robust cuts:
model, x, y, z, cov = create_model(coluna, pricing_callback)
attach_data(model, cov)
MOI.set(model, MOI.UserCutCallback(), valid_inequalities_callback);
# dual bound found after optimization = 1591.55
# And with non-robust cuts:
model, x, y, z, cov = create_model(coluna, pricing_callback)
attach_data(model, cov)
MOI.set(model, MOI.UserCutCallback(), r1c_callback);
# dual bound found after optimization = 1598.26
# Finally we add both robust and non-robust cuts:
model, x, y, z, cov = create_model(coluna, pricing_callback)
attach_data(model, cov)
MOI.set(model, MOI.UserCutCallback(), cuts_callback);
# dual bound found after optimization = 1600.63
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 9052 | # # [Custom Variables and Cuts](@id tuto_custom_data)
#
# Coluna allows users to attach custom data to variables and constraints.
# This data is useful to store information about the variables or constraints in a custom
# format much easier to process than extracted information from the formulation
# (coefficient matrix, bounds, costs, and right-hand side).
#
# In this example, we will show how to attach custom data to variables and constraints and
# use them to separate non-robust cuts. We will use the Bin Packing problem as an example.
#
# Let us consider a Bin Packing problem with only 3 items such that any pair of items
# fits into one bin but the 3 items do not. The objective function is to minimize the number
# of bins being used. Pricing is done by inspection over the 6 combinations of items (3 pairs and 3
# singletons). The master LP solution has 1.5 bins at the root node,
# each 0.5 corresponding to a bin with one of the possible pairs of items.
#
# In this example, we will show you how to use non-robust cuts to improve the master LP
# solution at the root node.
# Obviously, Coluna is able to solve this instance by branching on the
# number of bins but the limit one on the number of nodes prevents it to be solved without
# cuts.
#
# We define the dependencies:
using JuMP, BlockDecomposition, Coluna, GLPK;
# We define the solver.
coluna = JuMP.optimizer_with_attributes(
Coluna.Optimizer,
"default_optimizer" => GLPK.Optimizer,
"params" => Coluna.Params(
solver = Coluna.Algorithm.TreeSearchAlgorithm(
conqueralg = Coluna.Algorithm.ColCutGenConquer(
colgen = Coluna.Algorithm.ColumnGeneration(
pricing_prob_solve_alg = Coluna.Algorithm.SolveIpForm(
optimizer_id = 1
))
),
maxnumnodes = 1 # we only treat the root node.
)
)
);
# Let's define the model.
# Let's $B$ the set of bins and $I$ the set of items.
# We introduce variable $y_b$ that is equal to 1 if a bin $b$ is used and 0 otherwise.
# We introduce variable $x_{b,i}$ that is equal to 1 if item $i$ is put in a bin $b$ and 0 otherwise.
model = BlockModel(coluna);
# We must assign three items:
I = [1, 2, 3];
# And we have three bins:
B = [1, 2, 3];
# Each bin is defining a subproblem, we declare our axis:
@axis(axis, collect(B));
# We declare subproblem variables `y[b]`:
@variable(model, y[b in axis], Bin);
# And `x[b,i]`:
@variable(model, x[b in axis, i in I], Bin);
# Each item must be assigned to one bin:
@constraint(model, sp[i in I], sum(x[b,i] for b in axis) == 1);
# We minimize the number of bins and we declare the decomposition:
@objective(model, Min, sum(y[b] for b in axis))
@dantzig_wolfe_decomposition(model, dec, axis);
# ## Custom data for non-robust cuts
# As said previously, at the end of the column generation at the root node,
# the master LP solution has 1.5 bins. It corresponds to three bins, each of them used 0.5 times
# containing one pair `(1,2)`, `(1, 3)`, or `(2, 3)` of items.
# We are going to introduce the following non-robust cut to make the master LP solution integral:
# $$\sum\limits_{s \in S~if~length(s) \geq 2} λ_s \leq 1$$
#
# where :
# - $S$ is the set of possible bin assignments generated by the pricing problem.
# - $length(s)$ the number of items in bin assignment $s \in S$.
# This cut means that we cannot have more than one bin with at least two items.
# But the problem is that the cut is expressed over the master column and we don't have
# access to these variables from the JuMP model.
# To address this problem, Coluna offers a way to compute the coefficient of a column in a
# constraint by implementing the following method:
#
# ```@docs
# Coluna.MathProg.computecoeff
# ```
#
#
# We therefore need to attach custom data to the master columns and the non-robust cut to
# use the method `compute_coeff`.
#
# For every subproblem solution $s$, we define custom data with the number of items in the bin.
struct MyCustomVarData <: BlockDecomposition.AbstractCustomVarData
nb_items::Int
end
BlockDecomposition.customvars!(model, MyCustomVarData);
# We define custom data for the cut that will contain the minimum number of items
# in a bin that can be used. The value will be `2` in this example.
struct MyCustomCutData <: BlockDecomposition.AbstractCustomConstrData
min_items::Int
end
BlockDecomposition.customconstrs!(model, MyCustomCutData);
# We implement the `computecoeff` method for the custom data we defined.
function Coluna.MathProg.computecoeff(
var_custom_data::MyCustomVarData, constr_custom_data::MyCustomCutData
)
return (var_custom_data.nb_items >= constr_custom_data.min_items) ? 1.0 : 0.0
end
# ## Pricing callback
# We define the pricing callback that will generate the bin with best-reduced cost.
# Be careful, when using non-robust cuts, you must take into account the contribution of the
# non-robust cuts to the reduced cost of your solution.
function my_pricing_callback(cbdata)
## Get the reduced costs of the original variables.
I = [1, 2, 3]
b = BlockDecomposition.callback_spid(cbdata, model)
rc_y = BlockDecomposition.callback_reduced_cost(cbdata, y[b])
rc_x = [BlockDecomposition.callback_reduced_cost(cbdata, x[b, i]) for i in I]
## Get the dual values of the custom cuts (to calculate contributions of
## non-robust cuts to the cost of the solution).
custduals = Tuple{Int, Float64}[]
for (_, constr) in Coluna.MathProg.getconstrs(cbdata.form.parent_formulation)
if typeof(constr.custom_data) == MyCustomCutData
push!(custduals, (
constr.custom_data.min_items,
Coluna.MathProg.getcurincval(cbdata.form.parent_formulation, constr)
))
end
end
## Pricing by inspection.
sols = [[1], [2], [3], [1, 2], [1, 3], [2, 3]]
best_s = Int[]
best_rc = Inf
for s in sols
rc_s = rc_y + sum(rc_x[i] for i in s) # reduced cost of the subproblem variables
if !isempty(custduals)
## contribution of the non-robust cuts
rc_s -= sum((length(s) >= minits) ? dual : 0.0 for (minits, dual) in custduals)
end
if rc_s < best_rc
best_rc = rc_s
best_s = s
end
end
@show best_s
## build the best one and submit
solcost = best_rc
solvars = JuMP.VariableRef[]
solvarvals = Float64[]
for i in best_s
push!(solvars, x[b, i])
push!(solvarvals, 1.0)
end
push!(solvars, y[b])
push!(solvarvals, 1.0)
## submit the solution
MOI.submit(
model, BlockDecomposition.PricingSolution(cbdata),
solcost,
solvars,
solvarvals,
MyCustomVarData(length(best_s)) # attach a custom data to the column
)
MOI.submit(model, BlockDecomposition.PricingDualBound(cbdata), solcost)
return
end
# The pricing callback is done, we define it as the solver of our pricing problem.
subproblems = BlockDecomposition.getsubproblems(dec)
BlockDecomposition.specify!.(
subproblems,
solver = my_pricing_callback
);
# ## Non-robust cut separation callback.
# We now define the cut separation callback for our non-robust cut.
# This is the same callback as the one used for robust cuts.
# There is just one slight difference when you submit the non-robust cut.
# Since cuts are expressed over the master variables and these variables are inaccessible from
# the JuMP model, you'll submit a constraint with an empty left-hand side and you'll leave Coluna
# populate the left-hand side with the values returned by `Coluna.MathProg.computecoeff`.
# So let's define the callback.
# Basically, if the solution uses more than one bin with two items,
# The cut is added to the model.
function custom_cut_sep(cbdata)
## Compute the constraint violation by iterating over the master solution.
viol = -1.0
for (varid, varval) in cbdata.orig_sol
var = Coluna.MathProg.getvar(cbdata.form, varid)
if !isnothing(var.custom_data)
if var.custom_data.nb_items >= 2
viol += varval
end
end
end
## Add the cut (at most one variable with 2 or more of the 3 items) if violated.
if viol > 0.001
MOI.submit(
model, MOI.UserCut(cbdata),
JuMP.ScalarConstraint(
JuMP.AffExpr(0.0), # We cannot express the left-hand side so we push 0.
MOI.LessThan(1.0)
),
MyCustomCutData(2) # Cut custom data.
)
end
return
end
MOI.set(model, MOI.UserCutCallback(), custom_cut_sep)
JuMP.optimize!(model)
# We see on the output that the algorithm has converged a first time before a cut is added.
# Coluna then starts a new iteration taking into account the cut.
# We notice here an improvement of the value of the dual bound: before the cut,
# we converge towards 1.5. After the cut, we reach 2.0.
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 4199 | # # [Valid inequalities](@id tuto_cut_callback)
# Now let us consider a variant of the Generalized Assignment Problem in which we have to
# pay `f[m]` to use machine `m`.
# Consider the following instance:
J = 1:10
M = 1:5
c = [10.13 15.6 15.54 13.41 17.08;19.58 16.83 10.75 15.8 14.89;14.23 17.36 16.05 14.49 18.96;16.47 16.38 18.14 15.46 11.64;17.87 18.25 13.12 19.16 16.33;11.09 16.76 15.5 12.08 13.06;15.19 13.86 16.08 19.47 15.79;10.79 18.96 16.11 19.78 15.55;12.03 19.03 16.01 14.46 12.77;14.48 11.75 16.97 19.95 18.32];
w = [5, 4, 5, 6, 8, 9, 5, 8, 10, 7];
Q = [25, 24, 31, 28, 24];
f = [105, 103, 109, 112, 100];
# We define the dependencies:
using JuMP, BlockDecomposition, Coluna, GLPK;
# We parametrize the solver.
# We solve only the root node of the branch-and-bound tree and we use a column and cut
# generation algorithm to conquer (optimize) this node.
coluna = JuMP.optimizer_with_attributes(
Coluna.Optimizer,
"params" => Coluna.Params(
solver = Coluna.Algorithm.TreeSearchAlgorithm(
conqueralg = Coluna.Algorithm.ColCutGenConquer(
max_nb_cut_rounds = 20
),
branchingtreefile = "tree2.dot",
maxnumnodes = 1
)
),
"default_optimizer" => GLPK.Optimizer
);
# ## Column generation
# We write the model:
model = BlockModel(coluna; direct_model = true);
@axis(M_axis, M)
@variable(model, x[j in J, m in M_axis], Bin);
@variable(model, y[m in M_axis], Bin);
@constraint(model, setpartitioning[j in J], sum(x[j,m] for m in M_axis) == 1);
@constraint(model, knp[m in M_axis], sum(w[j]*x[j,m] for j in J) <= Q[m] * y[m]);
@objective(model, Min, sum(c[j,m] * x[j,m] for m in M_axis, j in J) + sum(f[m] * y[m] for m in M_axis));
@dantzig_wolfe_decomposition(model, dec, M_axis);
sp = getsubproblems(dec);
specify!.(sp, lower_multiplicity = 0);
# We optimize:
optimize!(model)
# The final dual bound is:
db1 = objective_bound(model)
# ## Strengthen with valid inequalities
# Let `H` be the set of configurations of open machines (`h[m] = 1` if machine m open; `0` otherwise)
# such that all jobs can be assigned : `sum(h'Q) >= sum(w)`
# i.e. the total capacity of the open machines must exceed the total weight of the jobs.
H = Vector{Int}[]
for h in digits.(1:(2^length(M) - 1), base=2, pad=length(M))
if sum(h'Q) >= sum(w)
push!(H, h)
end
end
H
# Let `ȳ` be the solution to the linear relaxation of the problem.
# Let us try to express `ȳ` as a linear expression of the configurations.
# If `ȳ ∈ conv H`, we can derive a cut because the optimal integer solution to the problem uses one of the configurations of H.
# We need MathOptInterface to define the cut callback:
using MathOptInterface
# The separation algorithm looks for the non-negative coefficients `χ[k]`, `k = 1:length(H)`, :
# `max sum(χ[k] for k in 1:length(H))` such that `sum(χ[k]* h for (k,h) in enumerate(H)) <= ̄ȳ`.
# If the objective value is less than 1, we must add a cut.
# Since the separation algorithm is a linear program, strong duality applies.
# So we separate these cuts with the dual.
fc_sep_m = Model(GLPK.Optimizer)
@variable(fc_sep_m, ψ[m in M] >= 0) # one variable for each constraint
@constraint(fc_sep_m, config_dual[h in H], ψ'h >= 1) # one constraint for each χ[k]
MathOptInterface.set(fc_sep_m, MathOptInterface.Silent(), true)
# The objective is `min ȳ'ψ` = `sum(χ[k] for k in 1:length(H))`.
# Let `ψ*` be an optimal solution to the dual. If `ȳ'ψ* < 1`, then `ψ*'y >= 1` is a valid inequality.
function fenchel_cuts_separation(cbdata)
println("Fenchel cuts separation callback...")
ȳ = [callback_value(cbdata, y[m]) for m in M_axis]
@objective(fc_sep_m, Min, ȳ'ψ) # update objective
optimize!(fc_sep_m)
if objective_value(fc_sep_m) < 1
con = @build_constraint(value.(ψ)'y >= 1) # valid inequality.
MathOptInterface.submit(model, MathOptInterface.UserCut(cbdata), con)
end
end
MathOptInterface.set(model, MathOptInterface.UserCutCallback(), fenchel_cuts_separation);
# We optimize:
optimize!(model)
# Valid inequalities significantly improve the previous dual bound:
db2 = objective_bound(model)
db2
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 3459 | # # [Identical subproblems](@id tuto_identical_sp)
# Let us see an example of resolution using the advantage of identical subproblems with Dantzig-Wolfe and a variant of the Generalized Assignment Problem.
# Consider a set of machine type `T = 1:nb_machine_types` and a set of jobs `J = 1:nb_jobs`.
# A machine type `t` has a resource capacity `Q[t]` and the factory contains `U[t]` machines of type `t`.
# A job `j` assigned to a machine of type `t` has a cost `c[t,j]` and consumes `w[t,j]` resource units of the machine of type `t`.
# Consider the following instance :
nb_machine_types = 2;
nb_jobs = 8;
J = 1:nb_jobs;
Q = [10, 15];
U = [3, 2]; # 3 machines of type 1 & 2 machines of type 2
c = [10 11 13 11 12 14 15 8; 20 21 23 21 22 24 25 18];
w = [4 4 5 4 4 3 4 5; 5 5 6 5 5 4 5 6];
#Here is the JuMP model to optimize this instance with a classic solver :
using JuMP, GLPK;
T1 = [1, 2, 3]; # U[1] machines
T2 = [4, 5]; # U[2] machines
M = union(T1, T2);
m2t = [1, 1, 1, 2, 2]; # machine id -> type id
model = Model(GLPK.Optimizer);
@variable(model, x[M, J], Bin); # 1 if job j assigned to machine m
@constraint(model, cov[j in J], sum(x[m,j] for m in M) == 1);
@constraint(model, knp[m in M], sum(w[m2t[m],j] * x[m,j] for j in J) <= Q[m2t[m]]);
@objective(model, Min, sum(c[m2t[m],j] * x[m,j] for m in M, j in J));
optimize!(model);
objective_value(model)
# You can decompose over the machines by defining an axis on `M`.
# However, if you want to take advantage of the identical subproblems, you must
# define the formulation as follows :
using BlockDecomposition, Coluna, JuMP, GLPK;
const BD = BlockDecomposition
coluna = optimizer_with_attributes(
Coluna.Optimizer,
"params" => Coluna.Params(
solver = Coluna.Algorithm.TreeSearchAlgorithm() # default BCP
),
"default_optimizer" => GLPK.Optimizer # GLPK for the master & the subproblems
);
@axis(T, 1:nb_machine_types);
model = BlockModel(coluna);
@variable(model, x[T, J], Bin); # 1 if job j assigned to machine m
@constraint(model, cov[j in J], sum(x[t,j] for t in T) == 1);
@constraint(model, knp[t in T], sum(w[t] * x[t,j] for j in J) <= Q[t]);
@objective(model, Min, sum(c[t,j] * x[t,j] for t in T, j in J));
# We assign jobs to a type of machine and we define one knapsack constraint for
# each type. This formulation cannot be solved as it stands with a commercial solver.
#
# Then, we decompose and specify the multiplicity of each knapsack subproblem :
@dantzig_wolfe_decomposition(model, dec_on_types, T);
sps = getsubproblems(dec_on_types)
for t in T
specify!(sps[t], lower_multiplicity = 0, upper_multiplicity = U[t]);
end
getsubproblems(dec_on_types)
# We see that subproblem for machine type 1 has an upper multiplicity equals to 3,
# and the second subproblem for machine type 2 has an upper multiplicity equals to 2.
# It means that we can use at most 3 machines of type 1 and at most 2 machines of type 2.
# We can then optimize
optimize!(model);
# and retrieve the disaggregated solution
for t in T
assignment_patterns = BD.getsolutions(model, t);
for pattern in assignment_patterns
nb_times_pattern_used = BD.value(pattern);
jobs_in_pattern = [];
for j in J
if BD.value(pattern, x[t, j]) ≈ 1
push!(jobs_in_pattern, j);
end
end
println("Pattern of machine type $t used $nb_times_pattern_used times : $jobs_in_pattern");
end
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 2884 | # # Initial columns
# The initial columns callback let you provide initial columns associated to each problem
# ahead the optimization.
# This callback is useful when you have an efficient heuristic that finds feasible solutions
# to the problem. You can then extract columns from the solutions and give them to Coluna
# through the callback.
# You have to make sure the columns you provide are feasible because Coluna won't check their
# feasibility.
# The cost of the columns will be computed using the perennial cost of subproblem variables.
# Let us see an example with the following generalized assignment problem :
M = 1:3;
J = 1:5;
c = [1 1 1 1 1; 1.2 1.2 1.1 1.1 1; 1.3 1.3 1.1 1.2 1.4];
Q = [3, 2, 3];
# with the following Coluna configuration
using JuMP, GLPK, BlockDecomposition, Coluna;
coluna = optimizer_with_attributes(
Coluna.Optimizer,
"params" => Coluna.Params(
solver = Coluna.Algorithm.TreeSearchAlgorithm() # default branch-cut-and-price
),
"default_optimizer" => GLPK.Optimizer # GLPK for the master & the subproblems
);
# for which the JuMP model takes the form:
@axis(M_axis, M);
model = BlockModel(coluna);
@variable(model, x[m in M_axis, j in J], Bin);
@constraint(model, cov[j in J], sum(x[m, j] for m in M_axis) >= 1);
@constraint(model, knp[m in M_axis], sum(x[m, j] for j in J) <= Q[m]);
@objective(model, Min, sum(c[m, j] * x[m, j] for m in M_axis, j in J));
@dantzig_wolfe_decomposition(model, decomposition, M_axis)
subproblems = getsubproblems(decomposition)
specify!.(subproblems, lower_multiplicity = 0, upper_multiplicity = 1)
# Let's consider that the following assignment patterns are good candidates:
machine1 = [[1,2,4], [1,3,4], [2,3,4], [2,3,5]];
machine2 = [[1,2], [1,5], [2,5], [3,4]];
machine3 = [[1,2,3], [1,3,4], [1,3,5], [2,3,4]];
initial_columns = [machine1, machine2, machine3];
# We can write the initial columns callback:
function initial_columns_callback(cbdata)
## Retrieve the index of the subproblem (it will be one of the values in M_axis)
spid = BlockDecomposition.callback_spid(cbdata, model)
println("initial columns callback $spid")
## Retrieve assignment patterns of a given machine
for col in initial_columns[spid]
## Create the column in the good representation
vars = [x[spid, j] for j in col]
vals = [1.0 for _ in col]
## Submit the column
MOI.submit(model, BlockDecomposition.InitialColumn(cbdata), vars, vals)
end
end
# The initial columns callback is a function.
# It takes as argument `cbdata` which is a data structure
# that allows the user to interact with Coluna within the callback.
# We provide the initial columns callback to Coluna through the following method:
MOI.set(model, BlockDecomposition.InitialColumnsCallback(), initial_columns_callback)
# You can then optimize:
optimize!(model)
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 4414 | # # [Pricing callback](@id tuto_pricing_callback)
# The pricing callback lets you define how to solve the subproblems of a Dantzig-Wolfe
# decomposition to generate a new entering column in the master program.
# This callback is useful when you know an efficient algorithm to solve the subproblems,
# i.e. an algorithm better than solving the subproblem with a MIP solver.
# First, we load the packages and define aliases :
using Coluna, BlockDecomposition, JuMP, MathOptInterface, GLPK;
const BD = BlockDecomposition;
const MOI = MathOptInterface;
# Let us see an example with the following generalized assignment problem :
M = 1:3;
J = 1:15;
c = [12.7 22.5 8.9 20.8 13.6 12.4 24.8 19.1 11.5 17.4 24.7 6.8 21.7 14.3 10.5; 19.1 24.8 24.4 23.6 16.1 20.6 15.0 9.5 7.9 11.3 22.6 8.0 21.5 14.7 23.2; 18.6 14.1 22.7 9.9 24.2 24.5 20.8 12.9 17.7 11.9 18.7 10.1 9.1 8.9 7.7; 13.1 16.2 16.8 16.7 9.0 16.9 17.9 12.1 17.5 22.0 19.9 14.6 18.2 19.6 24.2];
w = [61 70 57 82 51 74 98 64 86 80 69 79 60 76 78; 50 57 61 83 81 79 63 99 82 59 83 91 59 99 91;91 81 66 63 59 81 87 90 65 55 57 68 92 91 86; 62 79 73 60 75 66 68 99 69 60 56 100 67 68 54];
Q = [1020 1460 1530];
# with the following Coluna configuration
coluna = optimizer_with_attributes(
Coluna.Optimizer,
"params" => Coluna.Params(
solver = Coluna.Algorithm.TreeSearchAlgorithm() # default BCP
),
"default_optimizer" => GLPK.Optimizer # GLPK for the master & the subproblems
);
# for which the JuMP model takes the form:
model = BlockModel(coluna);
@axis(M_axis, M);
@variable(model, x[m in M_axis, j in J], Bin);
@constraint(model, cov[j in J], sum(x[m,j] for m in M_axis) == 1);
@objective(model, Min, sum(c[m,j]*x[m,j] for m in M_axis, j in J));
@dantzig_wolfe_decomposition(model, dwdec, M_axis);
# where, as you can see, we omitted the knapsack constraints.
# These constraints are implicitly defined by the algorithm called in the pricing callback.
# Let's use a knapsack algorithm defined by the following function to solve the knapsack
# subproblems:
function solve_knapsack(cost, weight, capacity)
sp_model = Model(GLPK.Optimizer)
items = 1:length(weight)
@variable(sp_model, x[i in items], Bin)
@constraint(sp_model, weight' * x <= capacity)
@objective(sp_model, Min, cost' * x)
optimize!(sp_model)
x_val = value.(x)
return filter(i -> x_val[i] ≈ 1, collect(items))
end
# You can replace the content of the function with any algorithm that solves the knapsack
# problem (such as algorithms provided by the unregistered package
# [Knapsacks](https://github.com/rafaelmartinelli/Knapsacks.jl)).
# The pricing callback is a function.
# It takes as argument `cbdata` which is a data structure
# that allows the user to interact with Coluna within the pricing callback.
function my_pricing_callback(cbdata)
## Retrieve the index of the subproblem (it will be one of the values in M_axis)
cur_machine = BD.callback_spid(cbdata, model)
## Uncomment to see that the pricing callback is called.
## println("Pricing callback for machine $(cur_machine).")
## Retrieve reduced costs of subproblem variables
red_costs = [BD.callback_reduced_cost(cbdata, x[cur_machine, j]) for j in J]
## Run the knapsack algorithm
jobs_assigned_to_cur_machine = solve_knapsack(red_costs, w[cur_machine, :], Q[cur_machine])
## Create the solution (send only variables with non-zero values)
sol_vars = [x[cur_machine, j] for j in jobs_assigned_to_cur_machine]
sol_vals = [1.0 for _ in jobs_assigned_to_cur_machine]
sol_cost = sum(red_costs[j] for j in jobs_assigned_to_cur_machine)
## Submit the solution to the subproblem to Coluna
MOI.submit(model, BD.PricingSolution(cbdata), sol_cost, sol_vars, sol_vals)
## Submit the dual bound to the solution of the subproblem
## This bound is used to compute the contribution of the subproblem to the lagrangian
## bound in column generation.
MOI.submit(model, BD.PricingDualBound(cbdata), sol_cost) # optimal solution
return
end
# The pricing callback is provided to Coluna using the keyword `solver` in the method
# `specify!`.
subproblems = BD.getsubproblems(dwdec);
BD.specify!.(subproblems, lower_multiplicity = 0, solver = my_pricing_callback);
# You can then optimize :
optimize!(model);
# and retrieve the information you need as usual :
objective_value(model)
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 6456 | # # [Column generation with the Generalized Assignment Problem](@id tuto_gen_assignement)
# This quick start guide introduces the main features of Coluna through the example of the
# Generalized Assignment Problem.
# ## Classic model solved with MIP solver
# Consider a set of machines `M` and a set of jobs `J`.
# A machine $m$ has a resource capacity $Q_m$ .
# A job $j$ assigned to a machine $m$ has a cost $c_{mj}$ and consumes $w_{mj}$ resource units
# of the machine $m$.
# The goal is to minimize the sum of job costs while assigning each job to a machine and not
# exceeding the capacity of each machine.
# Let $x_{mj}$ equal to one if job $j$ is assigned to machine $m$; $0$ otherwise.
# The problem has the original formulation:
# ```math
# \begin{alignedat}{4}
# \text{[GAP]} \equiv \min \mathrlap{\sum_{m \in M}\sum_{j \in J} c_{mj} x_{mj}} \\
# \text{s.t.} && \sum_{m \in M} x_{mj} &= 1 \quad& j \in J \\
# && \sum_{j \in J} w_{mj} x_{mj} &\leq Q_m \quad \quad& m \in M \\
# && x_{mj} &\in \{0,1\} &m \in M,\; j \in J
# \end{alignedat}
# ```
# Let us consider the following instance.
M = 1:3;
J = 1:15;
c = [12.7 22.5 8.9 20.8 13.6 12.4 24.8 19.1 11.5 17.4 24.7 6.8 21.7 14.3 10.5; 19.1 24.8 24.4 23.6 16.1 20.6 15.0 9.5 7.9 11.3 22.6 8.0 21.5 14.7 23.2; 18.6 14.1 22.7 9.9 24.2 24.5 20.8 12.9 17.7 11.9 18.7 10.1 9.1 8.9 7.7; 13.1 16.2 16.8 16.7 9.0 16.9 17.9 12.1 17.5 22.0 19.9 14.6 18.2 19.6 24.2];
w = [61 70 57 82 51 74 98 64 86 80 69 79 60 76 78; 50 57 61 83 81 79 63 99 82 59 83 91 59 99 91;91 81 66 63 59 81 87 90 65 55 57 68 92 91 86; 62 79 73 60 75 66 68 99 69 60 56 100 67 68 54];
Q = [1020 1460 1530];
# We write the model with [JuMP](https://github.com/jump-dev/JuMP.jl), a domain-specific modeling
# language for mathematical optimization embedded in Julia. We optimize with GLPK.
# If you are not familiar with the JuMP package, you may want to check its
# [documentation](https://jump.dev/JuMP.jl/stable/).
using JuMP, GLPK;
# A JuMP model for the original formulation is:
model = Model(GLPK.Optimizer)
@variable(model, x[m in M, j in J], Bin);
@constraint(model, cov[j in J], sum(x[m, j] for m in M) >= 1);
@constraint(model, knp[m in M], sum(w[m, j] * x[m, j] for j in J) <= Q[m]);
@objective(model, Min, sum(c[m, j] * x[m, j] for m in M, j in J));
# We optimize the instance and retrieve the objective value.
optimize!(model);
objective_value(model)
# ## Try column generation easily with Coluna and BlockDecomposition
# This model has a block structure: each knapsack constraint defines
# an independent block and the set-partitioning constraints couple these independent
# blocks. By applying the Dantzig-Wolfe reformulation, each knapsack constraint forms
# a tractable subproblem and the set-partitioning constraints are handled in a master problem.
# To write the model, you need JuMP and BlockDecomposition.
# The latter is an extension built on top of JuMP to model Dantzig-Wolfe and Benders decompositions.
# You will find more documentation about BlockDecomposition in the
# [Decomposition & reformulation](@ref)
# To optimize the problem, you need Coluna and a Julia package that provides a MIP solver such as GLPK.
# Since we have already loaded JuMP and GLPK, we just need:
using BlockDecomposition, Coluna;
# Next, you instantiate the solver and define the algorithm that you use to optimize the problem.
# In this case, the algorithm is a classic branch-and-price provided by Coluna.
coluna = optimizer_with_attributes(
Coluna.Optimizer,
"params" => Coluna.Params(
solver = Coluna.Algorithm.TreeSearchAlgorithm() # default branch-cut-and-price
),
"default_optimizer" => GLPK.Optimizer # GLPK for the master & the subproblems
);
# In BlockDecomposition, an axis is an index set of subproblems.
# Let `M_axis` be the index set of machines; it defines an axis along which we can implement the
# desired decomposition.
@axis(M_axis, M);
# In this example, the axis `M_axis` defines one knapsack subproblem for each machine.
# For instance, the first machine index is 1 and is of type `BlockDecomposition.AxisId`:
M_axis[1]
typeof(M_axis[1])
# Jobs are not involved in the decomposition, set `J` of jobs thus stays as a classic
# range.
# The model takes the form:
model = BlockModel(coluna);
# You can write `BlockModel(coluna; direct_model = true)` to pass names of variables
# and constraints to Coluna.
@variable(model, x[m in M_axis, j in J], Bin);
@constraint(model, cov[j in J], sum(x[m, j] for m in M_axis) >= 1);
@constraint(model, knp[m in M_axis], sum(w[m, j] * x[m, j] for j in J) <= Q[m]);
@objective(model, Min, sum(c[m, j] * x[m, j] for m in M_axis, j in J));
# This is the same model as above except that we use a `BlockModel` instead of a `Model` and
# `M_axis` as the set of machines instead of `M`.
# Therefore, BlockDecomposition will know which variables and constraints are involved in subproblems
# because one of their indices is a `BlockDecomposition.AxisId`.
# You then apply a Dantzig-Wolfe decomposition along `M_axis`:
@dantzig_wolfe_decomposition(model, decomposition, M_axis)
# where `decomposition` is a variable that contains information about the decomposition.
decomposition
# Once the decomposition is defined, you can retrieve the master and the subproblems to give
# additional information to the solver.
master = getmaster(decomposition)
subproblems = getsubproblems(decomposition)
# The multiplicity of a subproblem is the number of times that the same independent block
# shaped by the subproblem appears in the model. This multiplicity also specifies the number of
# solutions to the subproblem that can appear in the solution to the original problem.
# In this GAP instance, the upper multiplicity is $1$ because every subproblem is different,
# *i.e.*, every machine is different and used at most once.
# The lower multiplicity is $0$ because a machine may stay unused.
# The multiplicity specifications take the form:
specify!.(subproblems, lower_multiplicity = 0, upper_multiplicity = 1)
getsubproblems(decomposition)
# The model is now fully defined. To solve it, you need to call:
optimize!(model)
# You can find more information about the output of the column generation algorithm [ColumnGeneration](@ref).
# Finally, you can retrieve the solution to the original formulation with JuMP methods.
# For example, if we want to know if job 3 is assigned to machine 1:
value(x[1,3])
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 1720 | module Coluna
# Base functions for which we define more methods in Coluna
import Base: isempty, hash, isequal, length, iterate, getindex, lastindex,
getkey, delete!, setindex!, haskey, copy, promote_rule, convert, isinteger,
push!, filter, diff, hcat, in
import BlockDecomposition, MathOptInterface, TimerOutputs
using Base.Threads, Dates, DynamicSparseArrays, Logging, Parameters, Printf, TOML
const BD = BlockDecomposition
const MOI = MathOptInterface
const TO = TimerOutputs
### Default parameters values
const DEF_OPTIMALITY_ATOL = 1e-5
const DEF_OPTIMALITY_RTOL = 1e-9
const TOL = 1e-8 # if - ϵ_tol < val < ϵ_tol, we consider val = 0
const TOL_DIGITS = 8 # because round(val, digits = n) where n is from 1e-n
const MAX_NB_ELEMS = typemax(Int32) # max number of variables or constraints.
###
# submodules
export Algorithm, ColunaBase, MathProg, Env, DefaultOptimizer, Parameters,
elapsed_optim_time
const _to = TO.TimerOutput()
version() = v"0.8.1"
include("kpis.jl")
include("parameters.jl")
include("env.jl")
export Env
include("MustImplement/MustImplement.jl")
using .MustImplement
include("ColunaBase/ColunaBase.jl")
using .ColunaBase
include("MathProg/MathProg.jl")
using .MathProg
include("interface.jl")
using .AlgoAPI
include("TreeSearch/TreeSearch.jl")
using .TreeSearch
include("Branching/Branching.jl")
using .Branching
include("ColGen/ColGen.jl")
using .ColGen
include("Benders/Benders.jl")
using .Benders
include("Algorithm/Algorithm.jl")
using .Algorithm
include("annotations.jl")
include("optimize.jl")
# Wrapper functions
include("MOIwrapper.jl")
include("MOIcallbacks.jl")
include("decomposition.jl")
# For testing purposes
include("Tests/Tests.jl")
end # module
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 8241 | ############################################################################################
# Set callbacks
############################################################################################
function MOI.set(model::Coluna.Optimizer, attr::MOI.UserCutCallback, callback_function)
model.has_usercut_cb = true
orig_form = get_original_formulation(model.inner)
_register_callback!(orig_form, attr, callback_function)
return
end
function MOI.set(model::Coluna.Optimizer, attr::MOI.LazyConstraintCallback, callback_function)
model.has_lazyconstraint_cb = true
orig_form = get_original_formulation(model.inner)
_register_callback!(orig_form, attr, callback_function)
return
end
function MOI.set(model::Coluna.Optimizer, ::BD.PricingCallback, ::Nothing)
model.has_pricing_cb = true
# We register the pricing callback through the annotations.
return
end
function MOI.set(model::Coluna.Optimizer, ::BD.InitialColumnsCallback, callback_function::Function)
model.has_initialcol_cb = true
problem = model.inner
MathProg._register_initcols_callback!(problem, callback_function)
return
end
############################################################################################
# Pricing Callback #
############################################################################################
function _submit_pricing_solution(env, cbdata, cost, variables, values, custom_data)
form = cbdata.form
solval = cost
colunavarids = Coluna.MathProg.VarId[
_get_varid_of_origvar_in_form(env, form, v) for v in variables
]
# setup variable
setup_var_id = form.duty_data.setup_var
if !isnothing(setup_var_id)
push!(colunavarids, setup_var_id)
push!(values, 1.0)
solval += getcurcost(form, setup_var_id)
end
if !isnothing(solval)
sol = PrimalSolution(
form, colunavarids, values, solval, FEASIBLE_SOL;
custom_data = custom_data
)
push!(cbdata.primal_solutions, sol)
end
return
end
function MOI.submit(
model::Optimizer,
cb::BD.PricingSolution{MathProg.PricingCallbackData},
cost::Float64,
variables::Vector{MOI.VariableIndex},
values::Vector{Float64},
custom_data::Union{Nothing, BD.AbstractCustomVarData} = nothing
)
return _submit_pricing_solution(model.env, cb.callback_data, cost, variables, values, custom_data)
end
function _submit_dual_bound(cbdata, bound)
setup_var_cur_cost = if !isnothing(cbdata.form.duty_data.setup_var)
getcurcost(cbdata.form, cbdata.form.duty_data.setup_var)
else
0
end
if !isnothing(bound)
cbdata.dual_bound = bound + setup_var_cur_cost
else
cbdata.dual_bound = nothing
end
cbdata.nb_times_dual_bound_set += 1
return
end
function MOI.submit(
::Optimizer,
cb::BD.PricingDualBound{MathProg.PricingCallbackData},
bound
)
return _submit_dual_bound(cb.callback_data, bound)
end
function MOI.get(model::Optimizer, spid::BD.PricingSubproblemId{MathProg.PricingCallbackData})
callback_data = spid.callback_data
uid = getuid(callback_data.form)
axis_index_value = model.annotations.ann_per_form[uid].axis_index_value
return axis_index_value
end
function _get_pricing_var_cost(env::Env, cbdata, x)
form = cbdata.form
return getcurcost(form, _get_orig_varid(env, x))
end
function MOI.get(
model::Optimizer, pvc::BD.PricingVariableCost{MathProg.PricingCallbackData},
x::MOI.VariableIndex
)
return _get_pricing_var_cost(model.env, pvc.callback_data, x)
end
function _get_pricing_var_lb(env::Env, cbdata, x)
form = cbdata.form
return getcurlb(form, _get_orig_varid(env, x))
end
function MOI.get(
model::Optimizer, pvlb::BD.PricingVariableLowerBound{MathProg.PricingCallbackData},
x::MOI.VariableIndex
)
return _get_pricing_var_lb(model.env, pvlb.callback_data, x)
end
function _get_pricing_var_ub(env::Env, cbdata, x)
form = cbdata.form
return getcurub(form, _get_orig_varid(env, x))
end
function MOI.get(
model::Optimizer, pvub::BD.PricingVariableUpperBound{MathProg.PricingCallbackData},
x::MOI.VariableIndex
)
return _get_pricing_var_ub(model.env, pvub.callback_data, x)
end
############################################################################################
# Robust Constraints Callback #
############################################################################################
function _register_callback!(form::Formulation, ::MOI.UserCutCallback, sep::Function)
set_robust_constr_generator!(form, Facultative, sep)
return
end
function _register_callback!(form::Formulation, ::MOI.LazyConstraintCallback, sep::Function)
set_robust_constr_generator!(form, Essential, sep)
return
end
function MOI.get(
model::Optimizer, cvp::MOI.CallbackVariablePrimal{Algorithm.RobustCutCallbackContext},
x::MOI.VariableIndex
)
return get(cvp.callback_data.proj_sol_dict, _get_orig_varid(model.env, x), 0.0)
end
function MOI.submit(
model::Optimizer,
cb::Union{MOI.UserCut{Algorithm.RobustCutCallbackContext}, MOI.LazyConstraint{Algorithm.RobustCutCallbackContext}},
func::MOI.ScalarAffineFunction{Float64},
set::Union{MOI.LessThan{Float64}, MOI.GreaterThan{Float64}, MOI.EqualTo{Float64}},
custom_data::Union{Nothing, BD.AbstractCustomConstrData} = nothing
)
form = cb.callback_data.form
rhs = MathProg.convert_moi_rhs_to_coluna(set)
sense = MathProg.convert_moi_sense_to_coluna(set)
lhs = 0.0
members = Dict{VarId, Float64}()
# Robust terms
for term in func.terms
varid = _get_varid_of_origvar_in_form(model.env, form, term.variable)
members[varid] = term.coefficient
lhs += term.coefficient * get(cb.callback_data.proj_sol_dict, varid, 0.0)
end
constr = setconstr!(
form, "", MasterUserCutConstr;
rhs = rhs,
kind = cb.callback_data.constrkind,
sense = sense,
members = members,
loc_art_var_abs_cost = cb.callback_data.env.params.local_art_var_cost,
custom_data = custom_data
)
# Non-robust terms
for (varid, var) in getvars(form)
if !isnothing(var.custom_data)
lhs += MathProg.computecoeff(var.custom_data, custom_data)
end
end
gap = lhs - rhs
if sense == Less
push!(cb.callback_data.viol_vals, max(0.0, gap))
elseif sense == Greater
push!(cb.callback_data.viol_vals, -min(0.0, gap))
else
push!(cb.callback_data.viol_vals, abs(gap))
end
return getid(constr)
end
MOI.supports(::Optimizer, ::MOI.UserCutCallback) = true
MOI.supports(::Optimizer, ::MOI.LazyConstraintCallback) = true
############################################################################################
# Initial columns Callback #
############################################################################################
function _submit_initial_solution(env, cbdata, variables, values, custom_data)
@assert length(variables) == length(values)
form = cbdata.form
colunavarids = [_get_varid_of_origvar_in_form(env, form, v) for v in variables]
cost = sum(value * getperencost(form, varid) for (varid, value) in Iterators.zip(colunavarids, values))
return _submit_pricing_solution(env, cbdata, cost, variables, values, custom_data)
end
function MOI.submit(
model::Optimizer,
cb::BD.InitialColumn{MathProg.InitialColumnsCallbackData},
variables::Vector{MOI.VariableIndex},
values::Vector{Float64},
custom_data::Union{Nothing, BD.AbstractCustomVarData} = nothing
)
return _submit_initial_solution(model.env, cb.callback_data, variables, values, custom_data)
end
function MOI.get(model::Optimizer, spid::BD.PricingSubproblemId{MathProg.InitialColumnsCallbackData})
callback_data = spid.callback_data
uid = getuid(callback_data.form)
axis_index_value = model.annotations.ann_per_form[uid].axis_index_value
return axis_index_value
end | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 47068 | const CleverDicts = MOI.Utilities.CleverDicts
CleverDicts.index_to_key(::Type{Int}, index) = index
CleverDicts.key_to_index(key::Int) = key
@enum(ObjectiveType, SINGLE_VARIABLE, SCALAR_AFFINE, ZERO)
@enum(_VarKind, _CONT, _INT, _BINARY)
@enum(_VarBound, _LESS, _GREATER, _EQUAL, _INTERVAL, _NONE)
mutable struct _VarInfo
lb_type::_VarBound
ub_type::_VarBound
kind::_VarKind
index::MOI.VariableIndex
name::String
var::Variable
data::Union{Nothing, BlockDecomposition.AbstractCustomVarData}
end
_VarInfo(var::Variable) = _VarInfo(_NONE, _NONE, _CONT, MOI.VariableIndex(0), "", var, nothing)
mutable struct _ConstrInfo
name::String
index::Union{Nothing, MOI.ConstraintIndex}
constr::Constraint
data::Union{Nothing, BlockDecomposition.AbstractCustomConstrData}
end
_ConstrInfo(constr::Constraint) = _ConstrInfo("", nothing, constr, nothing)
mutable struct Optimizer <: MOI.AbstractOptimizer
env::Env
inner::Problem
is_objective_set::Bool
objective_type::ObjectiveType
objective_sense::Union{Nothing, MOI.OptimizationSense}
annotations::Annotations
varinfos::CleverDicts.CleverDict{MOI.VariableIndex, _VarInfo}
moi_varids::Dict{VarId, MOI.VariableIndex}
constrinfos::CleverDicts.CleverDict{Int, _ConstrInfo} # ScalarAffineFunction{Float64}-in-Set storage.
result::OptimizationState
disagg_result::Union{Nothing, OptimizationState}
default_optimizer_builder::Union{Nothing, Function}
# Names management
# name -> (index of the first variable that has the name, nb of vars with this name)
names_to_vars::Dict{String, Tuple{MOI.VariableIndex, Int}}
# Same for constraints (the first int is the id).
names_to_constrs::Dict{String, Tuple{Int, Int}}
# Callbacks
has_pricing_cb::Bool
has_usercut_cb::Bool
has_lazyconstraint_cb::Bool
has_initialcol_cb::Bool
function Optimizer()
model = new()
model.env = Env{VarId}(Params())
model.inner = Problem(model.env)
model.is_objective_set = false
model.objective_type = ZERO
model.objective_sense = nothing
model.annotations = Annotations()
model.varinfos = CleverDicts.CleverDict{MOI.VariableIndex, _VarInfo}()
model.moi_varids = Dict{VarId, MOI.VariableIndex}()
model.constrinfos = CleverDicts.CleverDict{Int, _ConstrInfo}()
model.result = OptimizationState(get_optimization_target(model.inner))
model.disagg_result = nothing
model.default_optimizer_builder = nothing
model.names_to_vars = Dict{String, Tuple{MOI.VariableIndex,Int}}()
model.names_to_constrs = Dict{String, Tuple{Int,Int}}()
model.has_pricing_cb = false
model.has_usercut_cb = false
model.has_lazyconstraint_cb = false
model.has_initialcol_cb = false
return model
end
end
MOI.get(::Optimizer, ::MOI.SolverName) = "Coluna"
MOI.get(::Optimizer, ::MOI.SolverVersion) = string(Coluna.version())
############################################################################################
# Empty.
############################################################################################
function MOI.empty!(model::Optimizer)
model.env.varids = CleverDicts.CleverDict{MOI.VariableIndex, VarId}()
model.inner = Problem(model.env)
model.is_objective_set = false
model.objective_type = ZERO
model.objective_sense = nothing
model.annotations = Annotations()
model.varinfos = CleverDicts.CleverDict{MOI.VariableIndex, _VarInfo}()
model.moi_varids = Dict{VarId, MOI.VariableIndex}()
model.constrinfos = CleverDicts.CleverDict{Int, _ConstrInfo}()
model.result = OptimizationState(get_optimization_target(model.inner))
model.disagg_result = nothing
if model.default_optimizer_builder !== nothing
set_default_optimizer_builder!(model.inner, model.default_optimizer_builder)
end
model.names_to_vars = Dict{String, Tuple{MOI.VariableIndex, Int}}()
model.names_to_constrs = Dict{String, Tuple{Int, Int}}()
model.has_pricing_cb = false
model.has_usercut_cb = false
model.has_lazyconstraint_cb = false
model.has_initialcol_cb = false
return
end
function MOI.is_empty(model::Optimizer)
reform = model.inner.re_formulation
origform = model.inner.original_formulation
return reform === nothing && length(getvars(origform)) == 0 &&
length(getconstrs(origform)) == 0 && !model.is_objective_set
end
############################################################################################
# Methods to get variable and constraint info.
############################################################################################
function _info(model::Optimizer, key::MOI.VariableIndex)
if haskey(model.varinfos, key)
return model.varinfos[key]
end
return throw(MOI.InvalidIndex(key))
end
function _info(model::Optimizer, key::MOI.ConstraintIndex{MOI.VariableIndex, S}) where {S}
varindex = MOI.VariableIndex(key.value)
return _info(model, varindex)
end
function _info(model::Optimizer, key::MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}})
if haskey(model.constrinfos, key.value)
return model.constrinfos[key.value]
end
return throw(MOI.InvalidIndex(key))
end
function _info(model::Optimizer, key::Int)
if haskey(model.constrinfos, key)
return model.constrinfos[key]
end
return throw(MOI.InvalidIndex(key))
end
############################################################################################
# Supported variables, constraints, and objectives.
############################################################################################
const SupportedObjFunc = Union{MOI.ScalarAffineFunction{Float64}, MOI.VariableIndex}
const SupportedVarSets = Union{
MOI.ZeroOne, MOI.Integer, MOI.LessThan{Float64}, MOI.EqualTo{Float64},
MOI.GreaterThan{Float64}, MOI.Interval{Float64}
}
const SupportedConstrFunc = Union{MOI.ScalarAffineFunction{Float64}}
const SupportedConstrSets = Union{
MOI.EqualTo{Float64}, MOI.GreaterThan{Float64}, MOI.LessThan{Float64}
}
MOI.supports_incremental_interface(::Optimizer) = true
MOI.supports_constraint(::Optimizer, ::Type{<:SupportedConstrFunc}, ::Type{<:SupportedConstrSets}) = true
MOI.supports_constraint(::Optimizer, ::Type{MOI.VariableIndex}, ::Type{<:SupportedVarSets}) = true
MOI.supports(::Optimizer, ::MOI.ObjectiveFunction{<:SupportedObjFunc}) = true
MOI.supports(::Optimizer, ::MOI.ObjectiveSense) = true
MOI.supports(::Optimizer, ::MOI.ConstraintPrimalStart) = false
MOI.supports(::Optimizer, ::MOI.ConstraintDualStart) = false
MOI.supports(::Optimizer, ::BlockDecomposition.ConstraintDecomposition) = true
MOI.supports(::Optimizer, ::BlockDecomposition.VariableDecomposition) = true
MOI.supports(::Optimizer, ::BlockDecomposition.RepresentativeVar) = true
MOI.supports(::Optimizer, ::BlockDecomposition.CustomVarValue) = true
MOI.supports(::Optimizer, ::BlockDecomposition.CustomConstrValue) = true
# Parameters
function MOI.set(model::Optimizer, param::MOI.RawOptimizerAttribute, val)
if param.name == "params"
model.env.params = val
elseif param.name == "default_optimizer"
optimizer_builder = () -> MoiOptimizer(MOI._instantiate_and_check(val))
model.default_optimizer_builder = optimizer_builder
set_default_optimizer_builder!(model.inner, optimizer_builder)
else
@warn("Unknown parameter $(param.name).")
end
return
end
function _get_orig_varid(env::Env, x::MOI.VariableIndex)
if haskey(env.varids, x)
return env.varids[x]
end
throw(MOI.InvalidIndex(x))
end
function _get_varid_of_origvar_in_form(
env::Env, form::Formulation, x::MOI.VariableIndex
)
origid = _get_orig_varid(env, x)
return getid(getvar(form, origid))
end
function MOI.optimize!(model::Optimizer)
model.result, model.disagg_result = optimize!(
model.env, model.inner, model.annotations
)
return
end
function MOI.copy_to(dest::Optimizer, src::MOI.ModelLike)
return MOI.Utilities.default_copy_to(dest, src)
end
############################################################################################
# Add variables
############################################################################################
# See https://jump.dev/JuMP.jl/stable/moi/tutorials/implementing/#Dealing-with-multiple-variable-bounds
# to understand the purpose of _throw_if_existing_* methods.
function _throw_if_existing_lower(
bound::_VarBound,
::Type{S},
variable::MOI.VariableIndex,
) where {S<:MOI.AbstractSet}
if bound == _GREATER
throw(MOI.LowerBoundAlreadySet{MOI.GreaterThan{Float64},S}(variable))
elseif bound == _INTERVAL
throw(MOI.LowerBoundAlreadySet{MOI.Interval{Float64},S}(variable))
elseif bound == _EQUAL
throw(MOI.LowerBoundAlreadySet{MOI.EqualTo{Float64},S}(variable))
end
return
end
function _throw_if_existing_upper(
bound::_VarBound,
::Type{S},
variable::MOI.VariableIndex,
) where {S<:MOI.AbstractSet}
if bound == _LESS
throw(MOI.UpperBoundAlreadySet{MOI.LessThan{Float64},S}(variable))
elseif bound == _INTERVAL
throw(MOI.UpperBoundAlreadySet{MOI.Interval{Float64},S}(variable))
elseif bound == _EQUAL
throw(MOI.UpperBoundAlreadySet{MOI.EqualTo{Float64},S}(variable))
end
return
end
function MOI.add_variable(model::Optimizer)
orig_form = get_original_formulation(model.inner)
var = setvar!(orig_form, "", OriginalVar)
varinfo = _VarInfo(var)
index = CleverDicts.add_item(model.varinfos, varinfo)
varinfo.index = index
model.moi_varids[getid(var)] = index
index2 = CleverDicts.add_item(model.env.varids, getid(var))
@assert index == index2
return index
end
############################################################################################
# Add constraint
############################################################################################
function _add_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, ::MOI.Integer
)
setperenkind!(form, varinfo.var, Integ)
varinfo.kind = _INT
return
end
function _add_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, ::MOI.ZeroOne
)
setperenkind!(form, varinfo.var, Binary)
varinfo.kind = _BINARY
return
end
function _add_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, set::MOI.GreaterThan{Float64}
)
_throw_if_existing_lower(varinfo.lb_type, MOI.GreaterThan{Float64}, varinfo.index)
MathProg.setperenlb!(form, varinfo.var, set.lower)
varinfo.lb_type = _GREATER
return
end
function _add_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, set::MOI.LessThan{Float64}
)
_throw_if_existing_upper(varinfo.ub_type, MOI.LessThan{Float64}, varinfo.index)
MathProg.setperenub!(form, varinfo.var, set.upper)
varinfo.ub_type = _LESS
return
end
function _add_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, set::MOI.EqualTo{Float64}
)
_throw_if_existing_lower(varinfo.lb_type, MOI.EqualTo{Float64}, varinfo.index)
_throw_if_existing_upper(varinfo.ub_type, MOI.EqualTo{Float64}, varinfo.index)
MathProg.setperenlb!(form, varinfo.var, set.value)
MathProg.setperenub!(form, varinfo.var, set.value)
varinfo.lb_type = _EQUAL
varinfo.ub_type = _EQUAL
return
end
function _add_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, set::MOI.Interval{Float64}
)
_throw_if_existing_lower(varinfo.lb_type, MOI.Interval{Float64}, varinfo.index)
_throw_if_existing_upper(varinfo.ub_type, MOI.Interval{Float64}, varinfo.index)
MathProg.setperenlb!(form, varinfo.var, set.lower)
MathProg.setperenub!(form, varinfo.var, set.upper)
varinfo.lb_type = _INTERVAL
varinfo.ub_type = _INTERVAL
return
end
function MOI.add_constraint(
model::Optimizer, func::MOI.VariableIndex, set::S
) where {S<:SupportedVarSets}
origform = get_original_formulation(model.inner)
varinfo = _info(model, func)
_add_constraint_on_variable!(origform, varinfo, set)
index = MOI.ConstraintIndex{MOI.VariableIndex, S}(func.value)
return index
end
function MOI.add_constraint(
model::Optimizer, func::F, set::S
) where {F<:MOI.ScalarAffineFunction{Float64}, S<:SupportedConstrSets}
if !iszero(func.constant)
throw(MOI.ScalarFunctionConstantNotZero{Float64,F,S}(func.constant))
end
orig_form = get_original_formulation(model.inner)
members = Dict{VarId, Float64}()
for term in func.terms
var = _info(model, term.variable).var
members[getid(var)] = get(members, getid(var), 0.0) + term.coefficient
end
constr = setconstr!(
orig_form, "", OriginalConstr;
rhs = MathProg.convert_moi_rhs_to_coluna(set),
kind = Essential,
sense = MathProg.convert_moi_sense_to_coluna(set),
inc_val = 10.0,
members = members
)
constrinfo = _ConstrInfo(constr)
constr_index = CleverDicts.add_item(model.constrinfos, constrinfo)
model.constrinfos[constr_index] = constrinfo
index = MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}, S}(constr_index)
constrinfo.index = index
return index
end
############################################################################################
# Delete and modify variable
############################################################################################
function _delete_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, ::Type{<:MOI.Integer}
)
varinfo.kind = _CONT
setperenkind!(form, varinfo.var, Continuous)
return
end
function _delete_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, ::Type{<:MOI.ZeroOne}
)
varinfo.kind = _CONT
setperenkind!(form, varinfo.var, Continuous)
return
end
function _delete_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, ::Type{<:MOI.GreaterThan{Float64}}
)
varinfo.lb_type = _NONE
MathProg.setperenlb!(form, varinfo.var, -Inf)
return
end
function _delete_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, ::Type{<:MOI.LessThan{Float64}}
)
varinfo.ub_type = _NONE
MathProg.setperenub!(form, varinfo.var, Inf)
return
end
function _delete_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, ::Type{<:MOI.EqualTo{Float64}}
)
varinfo.lb_type = _NONE
varinfo.ub_type = _NONE
MathProg.setperenlb!(form, varinfo.var, -Inf)
MathProg.setperenub!(form, varinfo.var, Inf)
return
end
function _delete_constraint_on_variable!(
form::Formulation, varinfo::_VarInfo, ::Type{<:MOI.Interval{Float64}}
)
varinfo.lb_type = _NONE
varinfo.ub_type = _NONE
MathProg.setperenlb!(form, varinfo.var, -Inf)
MathProg.setperenub!(form, varinfo.var, Inf)
return
end
function MOI.delete(model::Optimizer, vi::MOI.VariableIndex)
MOI.throw_if_not_valid(model, vi)
MOI.modify(model, MoiObjective(), MOI.ScalarCoefficientChange(vi, 0.0))
for (_, constrinfo) in model.constrinfos
MOI.modify(model, constrinfo.index, MOI.ScalarCoefficientChange(vi, 0.0))
end
varid = getid(_info(model, vi).var)
delete!(get_original_formulation(model.inner), varid)
delete!(model.moi_varids, varid)
delete!(model.varinfos, vi)
delete!(model.env.varids, vi)
return
end
function MOI.modify(
model::Optimizer, ::MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}},
change::MathOptInterface.ScalarCoefficientChange{Float64}
)
setperencost!(
get_original_formulation(model.inner), _info(model, change.variable).var, change.new_coefficient
)
model.is_objective_set = true
return
end
############################################################################################
# Delete and modify constraint
############################################################################################
function MOI.delete(
model::Optimizer, ci::MOI.ConstraintIndex{F,S}
) where {F<:MOI.VariableIndex,S}
MOI.throw_if_not_valid(model, ci)
origform = get_original_formulation(model.inner)
varinfo = _info(model, ci)
_delete_constraint_on_variable!(origform, varinfo, S)
return
end
function MOI.delete(
model::Optimizer, ci::MOI.ConstraintIndex{F,S}
) where {F<:MOI.ScalarAffineFunction{Float64},S}
MOI.throw_if_not_valid(model, ci)
constrid = getid(_info(model, ci).constr)
orig_form = get_original_formulation(model.inner)
coefmatrix = getcoefmatrix(orig_form)
varids = VarId[]
for (varid, _) in @view coefmatrix[constrid, :]
push!(varids, varid)
end
for varid in varids
coefmatrix[constrid, varid] = 0.0
end
delete!(orig_form, constrid)
delete!(model.constrinfos, ci.value)
return
end
function MOI.modify(
model::Optimizer, ci::MOI.ConstraintIndex{F,S},
change::MOI.ScalarConstantChange{Float64}
) where {F<:MOI.ScalarAffineFunction{Float64},S}
MOI.throw_if_not_valid(model, ci)
setperenrhs!(get_original_formulation(model.inner), _info(model, ci).constr, change.new_constant)
return
end
function MOI.modify(
model::Optimizer, ci::MOI.ConstraintIndex{F,S},
change::MOI.ScalarCoefficientChange{Float64}
) where {F<:MOI.ScalarAffineFunction{Float64},S}
MOI.throw_if_not_valid(model, ci)
varid = getid(_info(model, change.variable).var)
constrid = getid(_info(model, ci).constr)
getcoefmatrix(get_original_formulation(model.inner))[constrid, varid] = change.new_coefficient
return
end
############################################################################################
# Get variables
############################################################################################
function MOI.get(model::Optimizer, ::MOI.ListOfVariableIndices)
indices = Vector{MathOptInterface.VariableIndex}()
for (_, value) in model.moi_varids
push!(indices, value)
end
return sort!(indices, by = x -> x.value)
end
############################################################################################
# Get constraints
############################################################################################
function MOI.get(model::Optimizer, ::MOI.ListOfConstraintTypesPresent)
orig_form = get_original_formulation(model.inner)
constraints = Set{Tuple{DataType, DataType}}()
for (_, varinfo) in model.varinfos
# Bounds
lb_type = varinfo.lb_type
ub_type = varinfo.ub_type
if lb_type == _GREATER
push!(constraints, (MOI.VariableIndex, MOI.GreaterThan{Float64}))
end
if ub_type == _LESS
push!(constraints, (MOI.VariableIndex, MOI.LessThan{Float64}))
end
if ub_type == _INTERVAL && lb_type == _INTERVAL
push!(constraints, (MOI.VariableIndex, MOI.Interval{Float64}))
end
if ub_type == _EQUAL && lb_type == _EQUAL
push!(constraints, (MOI.VariableIndex, MOI.EqualTo{Float64}))
end
# Kind
kind = varinfo.kind
if kind == _INT
push!(constraints, (MOI.VariableIndex, MOI.Integer))
end
if kind == _BINARY
push!(constraints, (MOI.VariableIndex, MOI.ZeroOne))
end
end
for (_, constrinfo) in model.constrinfos
constr = constrinfo.constr
constr_sense = MathProg.convert_coluna_sense_to_moi(getperensense(orig_form, constr))
push!(constraints, (MOI.ScalarAffineFunction{Float64}, constr_sense))
end
return collect(constraints)
end
function _add_constraint!(
indices::Vector{MOI.ConstraintIndex{F,S}}, index::MOI.ConstraintIndex{F,S}
) where {F,S}
push!(indices, index)
return
end
function _add_constraint!(
::Vector{MOI.ConstraintIndex{F,S}}, index::MOI.ConstraintIndex
) where {F,S}
return
end
function MOI.get(
model::Optimizer, ::MOI.ListOfConstraintIndices{F, S}
) where {F<:MOI.ScalarAffineFunction{Float64}, S}
indices = MOI.ConstraintIndex{F,S}[]
for (_, constrinfo) in model.constrinfos
_add_constraint!(indices, constrinfo.index)
end
return sort!(indices, by = x -> x.value)
end
_bound_enum(::Type{<:MOI.LessThan}) = _LESS
_bound_enum(::Type{<:MOI.GreaterThan}) = _GREATER
_bound_enum(::Type{<:MOI.Interval}) = _INTERVAL
_bound_enum(::Type{<:MOI.EqualTo}) = _EQUAL
_bound_enum(::Any) = nothing
_kind_enum(::Type{<:MOI.ZeroOne}) = _BINARY
_kind_enum(::Type{<:MOI.Integer}) = _INT
_kind_enum(::Any) = nothing
function MOI.get(
model::Optimizer, ::MOI.ListOfConstraintIndices{F, S}
) where {F<:MOI.VariableIndex, S}
indices = MOI.ConstraintIndex{F,S}[]
for (_, varinfo) in model.varinfos
if varinfo.lb_type == _bound_enum(S) || varinfo.ub_type == _bound_enum(S) || varinfo.kind == _kind_enum(S)
push!(indices, MOI.ConstraintIndex{MOI.VariableIndex, S}(varinfo.index.value))
end
end
return sort!(indices, by = x -> x.value)
end
function MOI.get(
model::Optimizer, ::MOI.ConstraintFunction, index::MOI.ConstraintIndex{F,S}
) where {F<:MOI.ScalarAffineFunction{Float64}, S}
MOI.throw_if_not_valid(model, index)
orig_form = get_original_formulation(model.inner)
constrid = getid(_info(model, index).constr)
terms = MOI.ScalarAffineTerm{Float64}[]
# Cannot get a view of the coefficient matrix when it is in fill mode.
matrix = getcoefmatrix(orig_form)
if matrix.matrix.fillmode
for (varid, coef) in view(matrix.matrix.buffer, constrid, :)
push!(terms, MOI.ScalarAffineTerm(coef, model.moi_varids[varid]))
end
else
for (varid, coef) in @view matrix[constrid, :]
push!(terms, MOI.ScalarAffineTerm(coef, model.moi_varids[varid]))
end
end
return MOI.ScalarAffineFunction(terms, 0.0)
end
function MOI.get(
model::Optimizer, ::MOI.ConstraintFunction, index::MOI.ConstraintIndex{F,S}
) where {F<:MOI.VariableIndex, S}
MOI.throw_if_not_valid(model, index)
return MOI.VariableIndex(index.value)
end
function MOI.get(
model::Optimizer, ::MOI.ConstraintSet, index::MOI.ConstraintIndex{F,S}
) where {F<:MOI.ScalarAffineFunction{Float64},S}
MOI.throw_if_not_valid(model, index)
orig_form = get_original_formulation(model.inner)
rhs = getperenrhs(orig_form, _info(model, index).constr)
return S(rhs)
end
function MOI.get(
model::Optimizer, ::MOI.ConstraintSet,
index::MOI.ConstraintIndex{MOI.VariableIndex, MOI.GreaterThan{Float64}}
)
MOI.throw_if_not_valid(model, index)
orig_form = get_original_formulation(model.inner)
lb = getperenlb(orig_form, _info(model, MOI.VariableIndex(index.value)).var)
return MOI.GreaterThan(lb)
end
function MOI.get(
model::Optimizer, ::MOI.ConstraintSet,
index::MOI.ConstraintIndex{MOI.VariableIndex, MOI.LessThan{Float64}}
)
MOI.throw_if_not_valid(model, index)
orig_form = get_original_formulation(model.inner)
ub = getperenub(orig_form, _info(model, MOI.VariableIndex(index.value)).var)
return MOI.LessThan(ub)
end
function MOI.get(
model::Optimizer, ::MOI.ConstraintSet,
index::MOI.ConstraintIndex{MOI.VariableIndex, MOI.EqualTo{Float64}}
)
MOI.throw_if_not_valid(model, index)
orig_form = get_original_formulation(model.inner)
lb = getperenlb(orig_form, _info(model, MOI.VariableIndex(index.value)).var)
ub = getperenub(orig_form, _info(model, MOI.VariableIndex(index.value)).var)
@assert lb == ub
return MOI.EqualTo(lb)
end
function MOI.get(
model::Optimizer, ::MOI.ConstraintSet,
index::MOI.ConstraintIndex{MOI.VariableIndex, MOI.Interval{Float64}}
)
MOI.throw_if_not_valid(model, index)
orig_form = get_original_formulation(model.inner)
lb = getperenlb(orig_form, _info(model, MOI.VariableIndex(index.value)).var)
ub = getperenub(orig_form, _info(model, MOI.VariableIndex(index.value)).var)
return MOI.Interval(lb, ub)
end
function MOI.get(
::Optimizer, ::MOI.ConstraintSet,
index::MOI.ConstraintIndex{MOI.VariableIndex, MOI.ZeroOne}
)
return MOI.ZeroOne()
end
function MOI.get(
::Optimizer, ::MOI.ConstraintSet,
index::MOI.ConstraintIndex{MOI.VariableIndex, MOI.Integer}
)
return MOI.Integer()
end
############################################################################################
# Set constraints
############################################################################################
function MOI.set(
model::Optimizer, ::MOI.ConstraintFunction, constrid::MOI.ConstraintIndex{F,S}, func::F
) where {F<:SupportedConstrFunc, S<:SupportedConstrSets}
MOI.throw_if_not_valid(model, constrid)
if !iszero(func.constant)
throw(MOI.ScalarFunctionConstantNotZero(func.constant))
end
constrinfo = _info(model, constrid)
id = getid(constrinfo.constr)
origform = get_original_formulation(model.inner)
coefmatrix = getcoefmatrix(origform)
varids = VarId[]
for (varid, _) in @view coefmatrix[id, :]
push!(varids, varid)
end
for varid in varids
coefmatrix[id, varid] = 0.0
end
for term in func.terms
var = _info(model, term.variable).var
coefmatrix[id, getid(var)] += term.coefficient
end
return
end
function MOI.set(
model::Optimizer, ::MOI.ConstraintSet, constrid::MOI.ConstraintIndex{F,S}, set::S
) where {F<:SupportedConstrFunc,S<:SupportedConstrSets}
MOI.throw_if_not_valid(model, constrid)
origform = get_original_formulation(model.inner)
constr = _info(model, constrid).constr
setperenrhs!(origform, constr, MathProg.convert_moi_rhs_to_coluna(set))
setperensense!(origform, constr, MathProg.convert_moi_sense_to_coluna(set))
return
end
function MOI.set(
::Optimizer, ::MOI.ConstraintFunction, ::MOI.ConstraintIndex{F,S}, ::S
) where {F<:MOI.VariableIndex,S}
return throw(MOI.SettingVariableIndexNotAllowed())
end
function MOI.set(
model::Optimizer, ::MOI.ConstraintSet, constrid::MOI.ConstraintIndex{F,S}, set::S
) where {F<:MOI.VariableIndex, S<:SupportedVarSets}
MOI.throw_if_not_valid(model, constrid)
(lb, ub) = MathProg.convert_moi_bounds_to_coluna(set)
varinfo = _info(model, constrid)
origform = get_original_formulation(model.inner)
MathProg.setperenlb!(origform, varinfo.var, lb)
MathProg.setperenub!(origform, varinfo.var, ub)
return
end
############################################################################################
# Names
############################################################################################
MOI.supports(::Optimizer, ::MOI.VariableName, ::Type{MOI.VariableIndex}) = true
function MOI.get(model::Optimizer, ::MOI.VariableName, varid::MOI.VariableIndex)
MOI.throw_if_not_valid(model, varid)
return _info(model, varid).name
end
function MOI.set(
model::Optimizer, ::MOI.VariableName, varid::MOI.VariableIndex, name::String
)
MOI.throw_if_not_valid(model, varid)
varinfo = _info(model, varid)
oldname = varinfo.name
varinfo.name = name
varinfo.var.name = name
if !isempty(oldname)
i, n = model.names_to_vars[oldname]
if n <= 1
delete!(model.names_to_vars, oldname)
else
model.names_to_vars[oldname] = (i, n-1)
end
end
if !isempty(name)
if !haskey(model.names_to_vars, name)
model.names_to_vars[name] = (varid, 1)
else
i, n = model.names_to_vars[name]
model.names_to_vars[name] = (i, n+1)
end
end
return
end
function MOI.supports(::Optimizer, ::MOI.ConstraintName, ::Type{<:MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}}})
return true
end
function MOI.get(model::Optimizer, ::MOI.ConstraintName, constrid::MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}, <:Any})
MOI.throw_if_not_valid(model, constrid)
return _info(model, constrid).name
end
function MOI.set(
model::Optimizer, ::MOI.ConstraintName, constrid::MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}, <:Any}, name::String
)
MOI.throw_if_not_valid(model, constrid)
constrinfo = _info(model, constrid)
oldname = constrinfo.name
constrinfo.name = name
constrinfo.constr.name = name
if !isempty(oldname)
i, n = model.names_to_constrs[oldname]
if n <= 1
delete!(model.names_to_constrs, oldname)
else
model.names_to_constrs[oldname] = (i, n-1)
end
end
if !isempty(name)
if !haskey(model.names_to_constrs, name)
model.names_to_constrs[name] = (constrid.value, 1)
else
i, n = model.names_to_constrs[name]
model.names_to_constrs[name] = (i, n+1)
end
end
return
end
function MOI.get(model::Optimizer, ::Type{MOI.VariableIndex}, name::String)
index, nb_vars_with_same_name = get(model.names_to_vars, name, (nothing, 0))
if nb_vars_with_same_name > 1
error("Duplicate variable name detected: $(name).")
end
return index
end
function MOI.get(
model::Optimizer, ::Type{MOI.ConstraintIndex}, name::String
)
index, nb_constrs_with_same_name = get(model.names_to_constrs, name, (nothing, 0))
if nb_constrs_with_same_name > 1
error("Duplicate constraint name detected: $(name).")
end
if index === nothing
return nothing
end
return _info(model, index).index
end
function MOI.get(model::Optimizer, ::Type{MOI.ConstraintIndex{F,S}}, name::String) where {F,S}
index = MOI.get(model, MOI.ConstraintIndex, name)
if typeof(index) == MOI.ConstraintIndex{F,S}
return index::MOI.ConstraintIndex{F,S}
end
return
end
############################################################################################
# Attributes of variables
############################################################################################
function MOI.set(
model::Optimizer, ::BD.VariableDecomposition, varid::MOI.VariableIndex,
annotation::BD.Annotation
)
store!(model.annotations, annotation, _info(model, varid).var)
return
end
# In the case of a representative variable.
function MOI.set(
model::Optimizer, ::BD.VariableDecomposition, varid::MOI.VariableIndex,
annotations::Vector{<:BD.Annotation}
)
store_repr!(model.annotations, annotations, _info(model, varid).var)
return
end
function MOI.set(
model::Optimizer, ::BD.VarBranchingPriority, varid::MOI.VariableIndex, branching_priority::Float64
)
var = _info(model, varid).var
var.branching_priority = branching_priority
return
end
function MOI.set(
model::Optimizer, ::BD.VarBranchingPriority, varid::MOI.VariableIndex, branching_priority::Int
)
var = _info(model, varid).var
var.branching_priority = Float64(branching_priority)
return
end
function MOI.set(
model::Optimizer, ::BD.CustomVarValue, varid::MOI.VariableIndex, custom_data
)
MOI.throw_if_not_valid(model, varid)
var = _info(model, varid).var
var.custom_data = custom_data
return
end
function MOI.get(model::Optimizer, ::BD.VarBranchingPriority, varid::MOI.VariableIndex)
var = _info(model, varid).var
return var.branching_priority
end
function MOI.get(model::Optimizer, ::MOI.ListOfVariableAttributesSet)
return MOI.AbstractVariableAttribute[MOI.VariableName()]
end
# TODO: we'll have to check if this implementation fits good pratices.
function MOI.set(model::Optimizer, ::BD.RepresentativeVar, varid::MOI.VariableIndex, annotations)
# nothing to do.
# see MOI.set(model, ::BD.VariableDecomposition, varid, ::Vector{<:BD.Annotation})
return
end
function MOI.get(model::Optimizer, ::BD.RepresentativeVar, varid::MOI.VariableIndex)
# nothing to return.
return
end
function MOI.set(model::Optimizer, ::BD.ListOfRepresentatives, list)
# nothing to do.
return
end
function MOI.get(model::Optimizer, ::BD.ListOfRepresentatives)
# nothing to return
return
end
############################################################################################
# Attributes of constraints
############################################################################################
# TODO move into BlockDecomposition.
function MOI.set(
model::MOI.ModelLike, attr::BlockDecomposition.ConstraintDecomposition,
bridge::MOI.Bridges.Constraint.SplitIntervalBridge, value
)
MOI.set(model.model, attr, bridge.lower, value)
MOI.set(model.model, attr, bridge.upper, value)
return
end
function MOI.set(
model::Optimizer, ::BD.ConstraintDecomposition, constrid::MOI.ConstraintIndex,
annotation::BD.Annotation
)
MOI.throw_if_not_valid(model, constrid)
store!(model.annotations, annotation, _info(model, constrid).constr)
return
end
function MOI.set(
model::Optimizer, ::BlockDecomposition.CustomConstrValue, constrid::MOI.ConstraintIndex,
custom_data
)
MOI.throw_if_not_valid(model, constrid)
constr = _info(model, constrid).constr
constr.custom_data = custom_data
return
end
function MOI.get(model::Optimizer, ::BD.ConstraintDecomposition, index::MOI.ConstraintIndex)
MOI.throw_if_not_valid(model, index)
constrinfo = _info(model, index)
return get(model.annotations.ann_per_constr, getid(constrinfo.constr), nothing)
end
function MOI.get(model::Optimizer, ::BD.VariableDecomposition, index::MOI.VariableIndex)
MOI.throw_if_not_valid(model, index)
varinfo = _info(model, index)
return get(model.annotations.ann_per_var, getid(varinfo.var), nothing)
end
function MOI.get(model::Optimizer, ::MOI.ListOfConstraintAttributesSet)
return MOI.AbstractConstraintAttribute[MOI.ConstraintName()]
end
function MOI.get(::Optimizer, ::MOI.ListOfConstraintAttributesSet{MOI.VariableIndex,<:MOI.AbstractScalarSet})
return MOI.AbstractConstraintAttribute[]
end
############################################################################################
# Objective
############################################################################################
function MOI.set(model::Optimizer, ::MOI.ObjectiveSense, sense::MOI.OptimizationSense)
orig_form = get_original_formulation(model.inner)
if sense == MOI.MIN_SENSE
set_objective_sense!(orig_form, true) # Min
elseif sense == MOI.MAX_SENSE
set_objective_sense!(orig_form, false) # Max
else
set_objective_sense!(orig_form, true) # Min
# Set the cost of all variables to 0
for (_, varinfo) in model.varinfos
setperencost!(orig_form, varinfo.var, 0.0)
end
end
model.objective_sense = sense
return
end
function MOI.get(model::Optimizer, ::MOI.ObjectiveSense)
if !isnothing(model.objective_sense)
return model.objective_sense
end
return MOI.FEASIBILITY_SENSE
end
function MOI.get(model::Optimizer, ::MOI.ObjectiveFunctionType)
if model.objective_type == SINGLE_VARIABLE
return MOI.VariableIndex
end
return MOI.ScalarAffineFunction{Float64}
end
function MOI.set(
model::Optimizer, ::MOI.ObjectiveFunction{F}, func::F
) where {F<:MOI.ScalarAffineFunction{Float64}}
origform = get_original_formulation(model.inner)
for (_, varinfo) in model.varinfos
setperencost!(origform, varinfo.var, 0.0)
end
for term in func.terms
var = _info(model, term.variable).var
cost = term.coefficient + getperencost(origform, var)
setperencost!(origform, var, cost)
end
if !iszero(func.constant)
setobjconst!(origform, func.constant)
end
model.objective_type = SCALAR_AFFINE
model.is_objective_set = true
return
end
function MOI.set(
model::Optimizer, ::MOI.ObjectiveFunction{MOI.VariableIndex},
func::MOI.VariableIndex
)
setperencost!(get_original_formulation(model.inner), _info(model, func).var, 1.0)
model.objective_type = SINGLE_VARIABLE
model.is_objective_set = true
return
end
function MOI.get(model::Optimizer, ::MOI.ObjectiveFunction{F}) where {F}
obj = MOI.get(model, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}())
return convert(F, obj)
end
function MOI.get(
model::Optimizer, ::MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}
)
orig_form = get_original_formulation(model.inner)
terms = MOI.ScalarAffineTerm{Float64}[]
for (id, varinfo) in model.varinfos
cost = getperencost(orig_form, varinfo.var)
if !iszero(cost)
push!(terms, MOI.ScalarAffineTerm(cost, id))
end
end
return MOI.ScalarAffineFunction(terms, getobjconst(orig_form))
end
############################################################################################
# Attributes of model
############################################################################################
function MOI.set(model::Optimizer, ::BD.DecompositionTree, tree::BD.Tree)
model.annotations.tree = tree
return
end
function MOI.set(model::Optimizer, ::BD.ObjectiveDualBound, db)
set_initial_dual_bound!(model.inner, db)
return
end
function MOI.set(model::Optimizer, ::BD.ObjectivePrimalBound, pb)
set_initial_primal_bound!(model.inner, pb)
return
end
function _customdata!(model::Optimizer, type::DataType)
haskey(model.env.custom_families_id, type) && return
model.env.custom_families_id[type] = length(model.env.custom_families_id)
return
end
function MOI.set(
model::Optimizer, ::BD.CustomVars, customvars
)
for customvar in customvars
_customdata!(model, customvar)
end
return
end
function MOI.set(
model::Optimizer, ::BD.CustomConstrs, customconstrs
)
for customconstr in customconstrs
_customdata!(model, customconstr)
end
return
end
mutable struct ColumnInfo <: BD.AbstractColumnInfo
optimizer::Optimizer
column_var_id::VarId
column_val::Float64
end
function BD.getsolutions(model::Optimizer, k)
ip_primal_sol = get_best_ip_primal_sol(model.disagg_result)
sp_columns_info = Vector{ColumnInfo}()
for (varid, val) in ip_primal_sol
if getduty(varid) <= MasterCol
if model.annotations.ann_per_form[getoriginformuid(varid)].axis_index_value == k
push!(sp_columns_info, ColumnInfo(model, varid, val))
end
end
end
return sp_columns_info
end
BD.value(info::ColumnInfo) = info.column_val
function BD.customdata(info::ColumnInfo)
masterform = getmaster(info.optimizer.inner.re_formulation)
return getcustomdata(masterform, info.column_var_id)
end
function BD.value(info::ColumnInfo, index::MOI.VariableIndex)
varid = info.optimizer.env.varids[index]
origin_form_uid = getoriginformuid(info.column_var_id)
spform = get_dw_pricing_sps(info.optimizer.inner.re_formulation)[origin_form_uid]
return get_primal_sol_pool(spform).solutions[info.column_var_id,varid]
end
function MOI.get(model::Optimizer, ::MOI.NumberOfVariables)
orig_form = get_original_formulation(model.inner)
return length(getvars(orig_form))
end
function MOI.get(model::Optimizer, ::MOI.NumberOfConstraints{F, S}) where {F, S}
return length(MOI.get(model, MOI.ListOfConstraintIndices{F, S}()))
end
function MOI.get(model::Optimizer, ::MOI.ListOfModelAttributesSet)
attributes = MOI.AbstractModelAttribute[]
if model.is_objective_set
F = MOI.get(model, MOI.ObjectiveFunctionType())
push!(attributes, MOI.ObjectiveFunction{F}())
end
if !isnothing(model.objective_sense)
push!(attributes, MOI.ObjectiveSense())
end
if model.has_usercut_cb
push!(attributes, MOI.UserCutCallback())
end
if model.has_lazyconstraint_cb
push!(attributes, MOI.LazyConstraintCallback())
end
if model.has_pricing_cb
push!(attributes, BD.PricingCallback())
end
if model.has_initialcol_cb
push!(attributes, BD.InitialColumnsCallback())
end
return attributes
end
############################################################################################
# is_valid methods
###########################################################################################
_is_valid(::Type{<:MOI.LessThan{Float64}}, lb, ub, kind) = ub == _LESS
_is_valid(::Type{<:MOI.GreaterThan{Float64}}, lb, ub, kind) = lb == _GREATER
_is_valid(::Type{<:MOI.EqualTo{Float64}}, lb, ub, kind) = lb == ub == _EQUAL
_is_valid(::Type{<:MOI.Interval{Float64}}, lb, ub, kind) = lb == ub == _INTERVAL
_is_valid(::Type{<:MOI.ZeroOne}, lb, ub, kind) = kind == _BINARY
_is_valid(::Type{<:MOI.Integer}, lb, ub, kind) = kind == _INT
function MOI.is_valid(
model::Optimizer, index::MOI.ConstraintIndex{F,S}
) where {F<:MOI.VariableIndex,S}
if !haskey(model.varinfos, MOI.VariableIndex(index.value))
return false
end
varinfo = _info(model, index)
return _is_valid(S, varinfo.lb_type, varinfo.ub_type, varinfo.kind)
end
function MOI.is_valid(
model::Optimizer, index::MOI.ConstraintIndex{F,S}
) where {F<:MOI.ScalarAffineFunction,S}
return haskey(model.constrinfos, index.value)
end
function MOI.is_valid(model::Optimizer, index::MOI.VariableIndex)
return haskey(model.varinfos, index)
end
# ######################
# ### Get functions ####
# ######################
function MOI.get(model::Optimizer, ::MOI.ObjectiveBound)
return getvalue(get_ip_dual_bound(model.result))
end
function MOI.get(model::Optimizer, attr::MOI.ObjectiveValue)
MOI.check_result_index_bounds(model, attr)
return getvalue(get_ip_primal_bound(model.result))
end
function MOI.get(model::Optimizer, attr::MOI.DualObjectiveValue)
MOI.check_result_index_bounds(model, attr)
return getvalue(get_lp_dual_bound(model.result))
end
function MOI.get(model::Optimizer, ::MOI.RelativeGap)
return ip_gap(model.result)
end
function MOI.get(model::Optimizer, attr::MOI.VariablePrimal, ref::MOI.VariableIndex)
MOI.check_result_index_bounds(model, attr)
id = getid(_info(model, ref).var) # This gets a coluna VarId
primalsols = get_ip_primal_sols(model.result)
if 1 <= attr.result_index <= length(primalsols)
return get(primalsols[attr.result_index], id, 0.0)
end
return error("Invalid result index.")
end
function MOI.get(model::Optimizer, attr::MOI.VariablePrimal, refs::Vector{MOI.VariableIndex})
MOI.check_result_index_bounds(model, attr)
best_primal_sol = get_best_ip_primal_sol(model.result)
if best_primal_sol === nothing
@warn "Coluna did not find a primal feasible solution."
return [NaN for ref in refs]
end
return [get(best_primal_sol, getid(model.varinfos[ref].var), 0.0) for ref in refs]
end
function MOI.get(model::Optimizer, ::MOI.TerminationStatus)
return convert_status(getterminationstatus(model.result))
end
function MOI.get(model::Optimizer, attr::MOI.PrimalStatus)
if attr.result_index != 1
return MOI.NO_SOLUTION
end
primal_sol = get_best_ip_primal_sol(model.result)
primal_sol === nothing && return MOI.NO_SOLUTION
return convert_status(getstatus(primal_sol))
end
function MOI.get(model::Optimizer, attr::MOI.DualStatus)
if attr.result_index != 1
return MOI.NO_SOLUTION
end
dual_sol = get_best_lp_dual_sol(model.result)
dual_sol === nothing && return MOI.NO_SOLUTION
return convert_status(getstatus(dual_sol))
end
function MOI.get(model::Optimizer, ::MOI.RawStatusString)
return string(getterminationstatus(model.result))
end
function MOI.get(model::Optimizer, ::MOI.ResultCount)
return length(get_ip_primal_sols(model.result))
end
function MOI.get(
model::Optimizer, attr::MOI.ConstraintPrimal, index::MOI.ConstraintIndex{F,S}
) where {F<:MOI.VariableIndex,S}
# TODO: throw if optimization in progress.
MOI.check_result_index_bounds(model, attr)
return MOI.get(model, MOI.VariablePrimal(), MOI.VariableIndex(index.value))
end
function MOI.get(model::Optimizer, attr::MOI.ConstraintPrimal, index::MOI.ConstraintIndex)
# TODO: throw if optimization in progress.
MOI.check_result_index_bounds(model, attr)
constr = _info(model, index).constr
best_primal_sol = get_best_ip_primal_sol(model.result)
return constraint_primal(best_primal_sol, getid(constr))
end
MOI.get(model::Optimizer, ::MOI.NodeCount) = model.env.kpis.node_count
MOI.get(model::Optimizer, ::MOI.SolveTimeSec) = model.env.kpis.elapsed_optimization_time
function MOI.get(
model::Optimizer, attr::MOI.ConstraintDual,
index::MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}}
)
MOI.throw_if_not_valid(model, index)
dualsols = get_lp_dual_sols(model.result)
sense = model.objective_sense == MOI.MAX_SENSE ? -1.0 : 1.0
if 1 <= attr.result_index <= length(dualsols)
return sense * get(dualsols[attr.result_index], getid(_info(model, index).constr), 0.0)
end
return error("Invalid result index.")
end
function _singlevarconstrdualval(dualsol, var, ::Type{<:MOI.GreaterThan})
value, activebound = get(get_var_redcosts(dualsol), getid(var), (0.0, MathProg.LOWER))
if !iszero(value) && activebound != MathProg.LOWER
return 0.0
end
return value
end
function _singlevarconstrdualval(dualsol, var, ::Type{<:MOI.LessThan})
value, activebound = get(get_var_redcosts(dualsol), getid(var), (0.0, MathProg.UPPER))
if !iszero(value) && activebound != MathProg.UPPER
return 0.0
end
return value
end
function _singlevarconstrdualval(dualsol, var, ::Type{<:MOI.EqualTo})
value, _ = get(get_var_redcosts(dualsol), getid(var), (0.0, MathProg.LOWER))
return value
end
function _singlevarconstrdualval(dualsol, var, ::Type{<:MOI.Interval})
value, _ = get(get_var_redcosts(dualsol), getid(var), (0.0, MathProg.LOWER))
return value
end
function MOI.get(
model::Optimizer, attr::MOI.ConstraintDual, index::MOI.ConstraintIndex{F,S}
) where {F<:MOI.VariableIndex,S}
# TODO: check if optimization in progress.
MOI.check_result_index_bounds(model, attr)
dualsols = get_lp_dual_sols(model.result)
sense = model.objective_sense == MOI.MAX_SENSE ? -1.0 : 1.0
if 1 <= attr.result_index <= length(dualsols)
dualsol = dualsols[attr.result_index]
varinfo = _info(model, MOI.VariableIndex(index.value))
return sense * _singlevarconstrdualval(dualsol, varinfo.var, S)
end
error("Invalid result index.")
end
# Useful method to retrieve dual values of generated cuts because they don't
# have MOI.ConstraintIndex
function MOI.get(
model::Optimizer, attr::MOI.ConstraintDual, constrid::ConstrId
)
# TODO: check if optimization in progress.
MOI.check_result_index_bounds(model, attr)
dualsols = get_lp_dual_sols(model.result)
sense = model.objective_sense == MOI.MAX_SENSE ? -1.0 : 1.0
if 1 <= attr.result_index <= length(dualsols)
return sense * get(dualsols[attr.result_index], constrid, 0.0)
end
return error("Invalid result index.")
end | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 2627 | ## Annotations
mutable struct Annotations
tree::Union{BD.Tree, Nothing}
ann_per_var::Dict{VarId, BD.Annotation}
ann_per_repr_var::Dict{VarId, Vector{BD.Annotation}}
ann_per_constr::Dict{ConstrId, BD.Annotation}
vars_per_ann::Dict{BD.Annotation, Dict{VarId,Variable}}
constrs_per_ann::Dict{BD.Annotation, Dict{ConstrId,Constraint}}
ann_per_form::Dict{Int, BD.Annotation}
annotation_set::Set{BD.Annotation}
end
Annotations() = Annotations(
nothing,
Dict{VarId, BD.Annotation}(),
Dict{VarId, Vector{BD.Annotation}}(),
Dict{ConstrId, BD.Annotation}(),
Dict{BD.Annotation, Dict{VarId,Variable}}(),
Dict{BD.Annotation, Dict{ConstrId,Constraint}}(),
Dict{Int, BD.Annotation}(),
Set{BD.Annotation}()
)
function store!(annotations::Annotations, ann::BD.Annotation, var::Variable)
push!(annotations.annotation_set, ann)
annotations.ann_per_var[getid(var)] = ann
if !haskey(annotations.vars_per_ann, ann)
annotations.vars_per_ann[ann] = Dict{VarId, Variable}()
end
annotations.vars_per_ann[ann][getid(var)] = var
return
end
function store_repr!(annotations::Annotations, ann::Vector{<:BD.Annotation}, var::Variable)
push!(annotations.annotation_set, ann...)
annotations.ann_per_repr_var[getid(var)] = ann
for a in ann
if !haskey(annotations.vars_per_ann, a)
annotations.vars_per_ann[a] = Dict{VarId, Variable}()
end
annotations.vars_per_ann[a][getid(var)] = var
end
return
end
function store!(annotations::Annotations, ann::BD.Annotation, constr::Constraint)
push!(annotations.annotation_set, ann)
annotations.ann_per_constr[getid(constr)] = ann
if !haskey(annotations.constrs_per_ann, ann)
annotations.constrs_per_ann[ann] = Dict{ConstrId, Constraint}()
end
annotations.constrs_per_ann[ann][getid(constr)] = constr
return
end
function store!(annotations::Annotations, form::AbstractFormulation, ann::BD.Annotation)
form_uid = getuid(form)
if haskey(annotations.ann_per_form, form_uid)
error("Formulation with uid $form_uid already has annotation.")
end
annotations.ann_per_form[form_uid] = ann
return
end
function Base.get(annotations::Annotations, form::AbstractFormulation)
form_uid = getuid(form)
if !haskey(annotations.ann_per_form, form_uid)
error("Formulation with uid $form_uid does not have any annotation.")
end
return annotations.ann_per_form[form_uid]
end
function is_representative(annotations::Annotations, varid::VarId)
return haskey(annotations.ann_per_repr_var, varid)
end | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 20294 | function set_glob_art_var(form::Formulation, is_pos::Bool, env::Env)
name = string("global_", (is_pos ? "pos" : "neg"), "_art_var")
cost = env.params.global_art_var_cost
cost *= getobjsense(form) == MinSense ? 1.0 : -1.0
return setvar!(
form, name, MasterArtVar;
cost=cost, lb=0.0, ub=Inf, kind=Continuous
)
end
function create_global_art_vars!(masterform::Formulation, env::Env)
global_pos = set_glob_art_var(masterform, true, env)
global_neg = set_glob_art_var(masterform, false, env)
matrix = getcoefmatrix(masterform)
for (constrid, constr) in getconstrs(masterform)
iscuractive(masterform, constrid) || continue
getduty(constrid) <= AbstractMasterOriginConstr || continue
if getcursense(masterform, constr) == Greater
matrix[constrid, getid(global_pos)] = 1.0
elseif getcursense(masterform, constr) == Less
matrix[constrid, getid(global_neg)] = -1.0
else # Equal
matrix[constrid, getid(global_pos)] = 1.0
matrix[constrid, getid(global_neg)] = -1.0
end
end
end
function instantiate_master!(
env::Env, origform::Formulation{Original}, ::Type{BD.Master}, ::Type{BD.DantzigWolfe}
)
form = create_formulation!(
env,
MathProg.DwMaster();
obj_sense=getobjsense(origform)
)
setobjconst!(form, getobjconst(origform))
return form
end
function instantiate_master!(
env::Env, origform::Formulation{Original}, ::Type{BD.Master}, ::Type{BD.Benders}
)
return create_formulation!(
env,
MathProg.BendersMaster();
obj_sense=getobjsense(origform)
)
end
function instantiate_sp!(
env::Env, master::Formulation{DwMaster}, ::Type{BD.DwPricingSp}, ::Type{BD.DantzigWolfe}
)
return create_formulation!(
env,
MathProg.DwSp(nothing, nothing, nothing, Integ);
parent_formulation=master,
obj_sense=getobjsense(master)
)
end
function instantiate_sp!(
env::Env, master::Formulation{BendersMaster}, ::Type{BD.BendersSepSp}, ::Type{BD.Benders}
)
return create_formulation!(
env,
MathProg.BendersSp();
parent_formulation=master,
obj_sense=getobjsense(master)
)
end
# Master of Dantzig-Wolfe decomposition
# We clone the variables and the single variable constraints at the same time
function instantiate_orig_vars!(
masterform::Formulation{DwMaster},
origform::Formulation,
annotations::Annotations,
mast_ann
)
vars_per_ann = annotations.vars_per_ann
for (ann, vars) in vars_per_ann
formtype = BD.getformulation(ann)
if formtype <: BD.Master
for (_, var) in vars
clonevar!(origform, masterform, masterform, var, MasterPureVar, is_explicit=true)
end
end
end
return
end
function instantiate_orig_constrs!(
masterform::Formulation{DwMaster},
origform::Formulation{Original},
env::Env,
annotations::Annotations,
mast_ann
)
!haskey(annotations.constrs_per_ann, mast_ann) && return
constrs = annotations.constrs_per_ann[mast_ann]
for (_, constr) in constrs
cloneconstr!(
origform, masterform, masterform, constr, MasterMixedConstr,
loc_art_var_abs_cost=env.params.local_art_var_cost
) # TODO distinguish Pure versus Mixed
end
# Cut generation callbacks
for constrgen in get_robust_constr_generators(origform)
set_robust_constr_generator!(masterform, constrgen.kind, constrgen.separation_alg)
end
return
end
function create_side_vars_constrs!(
masterform::Formulation{DwMaster},
origform::Formulation{Original},
env::Env,
annotations::Annotations
)
coefmatrix = getcoefmatrix(masterform)
for (spuid, spform) in get_dw_pricing_sps(masterform.parent_formulation)
ann = get(annotations, spform)
setupvars = filter(v -> getduty(v.first) == DwSpSetupVar, getvars(spform))
@assert length(setupvars) == 1
setupvar = collect(values(setupvars))[1]
setuprepvar = clonevar!(origform, masterform, spform, setupvar, MasterRepPricingSetupVar, is_explicit=false)
# create convexity constraint & storing information about the convexity constraint
# in the duty data of the formulation
lb_mult = Float64(BD.getlowermultiplicity(ann))
name = string("sp_lb_", spuid)
lb_conv_constr = setconstr!(
masterform, name, MasterConvexityConstr;
rhs=lb_mult, kind=Essential, sense=Greater, inc_val=100.0,
loc_art_var_abs_cost=env.params.local_art_var_cost
)
coefmatrix[getid(lb_conv_constr), getid(setuprepvar)] = 1.0
ub_mult = Float64(BD.getuppermultiplicity(ann))
name = string("sp_ub_", spuid)
ub_conv_constr = setconstr!(
masterform, name, MasterConvexityConstr; rhs=ub_mult,
kind=Essential, sense=Less, inc_val=100.0,
loc_art_var_abs_cost=env.params.local_art_var_cost
)
coefmatrix[getid(ub_conv_constr), getid(setuprepvar)] = 1.0
spform.duty_data.lower_multiplicity_constr_id = getid(lb_conv_constr)
spform.duty_data.upper_multiplicity_constr_id = getid(ub_conv_constr)
spform.duty_data.setup_var = getid(setupvar)
spform.duty_data.branching_priority = BD.getbranchingpriority(ann)
# If pricing subproblem variables are continuous, the master columns generated by
# the subproblem must have a continuous perenkind.
# This piece of information is stored in the duty data of the formulation.
continuous_columns = true
for (varid, var) in getvars(spform)
if getduty(varid) <= DwSpPricingVar && getperenkind(spform, var) !== Continuous
continuous_columns = false
break
end
end
spform.duty_data.column_var_kind = continuous_columns ? Continuous : Integ
end
return
end
function create_artificial_vars!(masterform::Formulation{DwMaster}, env::Env)
create_global_art_vars!(masterform, env)
return
end
# Pricing subproblem of Danztig-Wolfe decomposition
function instantiate_orig_vars!(
spform::Formulation{DwSp},
origform::Formulation{Original},
annotations::Annotations,
sp_ann
)
!haskey(annotations.vars_per_ann, sp_ann) && return
vars = annotations.vars_per_ann[sp_ann]
masterform = spform.parent_formulation
for (varid, var) in vars
# An original variable annotated in a subproblem is a DwSpPricingVar
clonevar!(origform, spform, spform, var, DwSpPricingVar, is_explicit=true)
if haskey(masterform, varid) && !is_representative(annotations, varid)
error("""
Variable $(getname(masterform, varid)) is in two subproblems but is not representative.
Please open an issue.
""")
end
if !haskey(masterform, varid)
lb, ub = if is_representative(annotations, varid)
mult_lb = sum(BD.getlowermultiplicity.(annotations.ann_per_repr_var[varid]))
mult_ub = sum(BD.getuppermultiplicity.(annotations.ann_per_repr_var[varid]))
(
min(getperenlb(origform, var) * mult_lb, getperenlb(origform, var) * mult_ub),
max(getperenub(origform, var) * mult_lb, getperenub(origform, var) * mult_ub)
)
else
mult_lb = BD.getlowermultiplicity(sp_ann)
mult_ub = BD.getuppermultiplicity(sp_ann)
(
min(getperenlb(origform, var) * mult_lb, getperenlb(origform, var) * mult_ub),
max(getperenub(origform, var) * mult_lb, getperenub(origform, var) * mult_ub)
)
end
isnan(lb) && (lb = -Inf)
isnan(ub) && (ub = Inf)
clonevar!(
origform,
masterform,
spform,
var,
MasterRepPricingVar,
is_explicit=false,
lb=lb,
ub=ub
)
end
end
return
end
function instantiate_orig_constrs!(
spform::Formulation{DwSp},
origform::Formulation{Original},
::Env,
annotations::Annotations,
sp_ann
)
!haskey(annotations.constrs_per_ann, sp_ann) && return
constrs = annotations.constrs_per_ann[sp_ann]
for (_, constr) in constrs
cloneconstr!(origform, spform, spform, constr, DwSpPureConstr; loc_art_var_abs_cost=0.0)
end
return
end
function create_side_vars_constrs!(
spform::Formulation{DwSp},
::Formulation{Original},
::Env,
::Annotations
)
name = "PricingSetupVar_sp_$(getuid(spform))"
setvar!(
spform, name, DwSpSetupVar; cost=0.0, lb=1.0, ub=1.0, kind=Integ,
is_explicit=true
)
return
end
function _dutyexpofbendmastvar(
var::Variable, annotations::Annotations, origform::Formulation{Original}
)
orig_coef = getcoefmatrix(origform)
for (constrid, _) in @view orig_coef[:, getid(var)]
constr_ann = annotations.ann_per_constr[constrid]
#if coef != 0 && BD.getformulation(constr_ann) == BD.Benders # TODO use haskey instead testing != 0
if BD.getformulation(constr_ann) == BD.BendersSepSp
return MasterBendFirstStageVar, true
end
end
return MasterPureVar, true
end
# Master of Benders decomposition
function instantiate_orig_vars!(
masterform::Formulation{BendersMaster},
origform::Formulation{Original},
annotations::Annotations,
mast_ann
)
!haskey(annotations.vars_per_ann, mast_ann) && return
vars = annotations.vars_per_ann[mast_ann]
for (_, var) in vars
clonevar!(origform, masterform, masterform, var, MasterPureVar, is_explicit=true)
end
return
end
function instantiate_orig_constrs!(
masterform::Formulation{BendersMaster},
origform::Formulation{Original},
::Env,
annotations::Annotations,
mast_ann
)
!haskey(annotations.constrs_per_ann, mast_ann) && return
constrs = annotations.constrs_per_ann[mast_ann]
for (_, constr) in constrs
cloneconstr!(
origform, masterform, masterform, constr, MasterPureConstr, is_explicit=true
)
end
return
end
function create_side_vars_constrs!(
masterform::Formulation{BendersMaster},
::Formulation{Original},
::Env,
::Annotations
)
for (spid, spform) in get_benders_sep_sps(masterform.parent_formulation)
name = "η[$(spid)]"
var = setvar!(
masterform, name, MasterBendSecondStageCostVar;
cost=1.0,
lb=-Inf,
ub=Inf,
kind=Continuous,
is_explicit=true
)
spform.duty_data.second_stage_cost_var = getid(var)
end
return
end
create_artificial_vars!(::Formulation{BendersMaster}, ::Env) = return
function instantiate_orig_vars!(
spform::Formulation{BendersSp},
origform::Formulation{Original},
annotations::Annotations,
sp_ann
)
if haskey(annotations.vars_per_ann, sp_ann)
vars = annotations.vars_per_ann[sp_ann]
for (_, var) in vars
clonevar!(origform, spform, spform, var, BendSpSepVar, cost=getperencost(origform, var))
end
end
return
end
function _dutyexpofbendspconstr(constr, annotations::Annotations, origform)
orig_coef = getcoefmatrix(origform)
for (varid, _) in orig_coef[getid(constr), :]
var_ann = annotations.ann_per_var[varid]
if BD.getformulation(var_ann) == BD.Master
return BendSpTechnologicalConstr, true
end
end
return BendSpPureConstr, true
end
function instantiate_orig_constrs!(
spform::Formulation{BendersSp},
origform::Formulation{Original},
::Env,
annotations::Annotations,
sp_ann
)
!haskey(annotations.constrs_per_ann, sp_ann) && return
constrs = annotations.constrs_per_ann[sp_ann]
for (_, constr) in constrs
duty, explicit = _dutyexpofbendspconstr(constr, annotations, origform)
cloneconstr!(origform, spform, spform, constr, duty, is_explicit=explicit, loc_art_var_abs_cost=1.0)
end
return
end
function create_side_vars_constrs!(
spform::Formulation{BendersSp},
origform::Formulation{Original},
::Env,
annotations::Annotations
)
spcoef = getcoefmatrix(spform)
origcoef = getcoefmatrix(origform)
# 1st level representative variables.
masterform = getmaster(spform)
mast_ann = get(annotations, masterform)
if haskey(annotations.vars_per_ann, mast_ann)
vars = annotations.vars_per_ann[mast_ann]
for (varid, var) in vars
duty, _ = _dutyexpofbendmastvar(var, annotations, origform)
if duty == MasterBendFirstStageVar
name = getname(origform, var)
repr_id = VarId(
varid,
duty=BendSpFirstStageRepVar,
assigned_form_uid=getuid(masterform)
)
repr = setvar!(
spform, name, BendSpFirstStageRepVar;
cost=getcurcost(origform, var),
lb=getcurlb(origform, var),
ub=getcurub(origform, var),
kind=Continuous,
is_explicit=false,
id=repr_id
)
for (constrid, coeff) in @view origcoef[:, varid]
spconstr = getconstr(spform, constrid)
if spconstr !== nothing
spcoef[getid(spconstr), getid(repr)] = coeff
end
end
end
end
end
return
end
function assign_orig_vars_constrs!(
destform::Formulation,
origform::Formulation{Original},
env::Env,
annotations::Annotations,
ann
)
instantiate_orig_vars!(destform, origform, annotations, ann)
instantiate_orig_constrs!(destform, origform, env, annotations, ann)
clonecoeffs!(origform, destform)
end
_optimizerbuilder(opt::Function) = () -> UserOptimizer(opt)
_optimizerbuilder(opt::MOI.AbstractOptimizer) = () -> MoiOptimizer(opt)
_optimizerbuilder(opt::BD.AbstractCustomOptimizer) = () -> CustomOptimizer(opt)
function getoptimizerbuilders(prob::Problem, ann::BD.Annotation)
optimizers = BD.getoptimizerbuilders(ann)
if length(optimizers) > 0
return map(o -> _optimizerbuilder(o), optimizers)
end
return [prob.default_optimizer_builder]
end
function _push_in_sp_dict!(
dws::Dict{FormId,Formulation{DwSp}},
::Dict{FormId,Formulation{BendersSp}},
spform::Formulation{DwSp}
)
push!(dws, getuid(spform) => spform)
end
function _push_in_sp_dict!(
::Dict{FormId,Formulation{DwSp}},
benders::Dict{FormId,Formulation{BendersSp}},
spform::Formulation{BendersSp}
)
push!(benders, getuid(spform) => spform)
end
function instantiate_formulations!(
prob::Problem, env::Env, annotations::Annotations, parent, node::BD.Root
)
ann = BD.annotation(node)
form_type = BD.getformulation(ann)
dec_type = BD.getdecomposition(ann)
origform = get_original_formulation(prob)
master = instantiate_master!(env, origform, form_type, dec_type)
store!(annotations, master, ann)
dw_pricing_sps = Dict{FormId,Formulation{DwSp}}()
benders_sep_sps = Dict{FormId,Formulation{BendersSp}}()
for (_, child) in BD.subproblems(node)
sp = instantiate_formulations!(prob, env, annotations, master, child)
_push_in_sp_dict!(dw_pricing_sps, benders_sep_sps, sp)
end
return master, dw_pricing_sps, benders_sep_sps
end
function instantiate_formulations!(
prob::Problem, env::Env, annotations::Annotations, parent::Formulation{MasterDuty}, node::BD.Leaf
) where {MasterDuty}
ann = BD.annotation(node)
form_type = BD.getformulation(ann)
dec_type = BD.getdecomposition(ann)
spform = instantiate_sp!(env, parent, form_type, dec_type)
store!(annotations, spform, ann)
return spform
end
function build_formulations!(
reform::Reformulation, prob::Problem, env::Env, annotations::Annotations, parent,
node::BD.Root
)
ann = BD.annotation(node)
master = getmaster(reform)
for (_, dw_sp) in get_dw_pricing_sps(reform)
build_formulations!(dw_sp, reform, prob, env, annotations, master)
end
for (_, bend_sp) in get_benders_sep_sps(reform)
build_formulations!(bend_sp, reform, prob, env, annotations, master)
end
origform = get_original_formulation(prob)
assign_orig_vars_constrs!(master, origform, env, annotations, ann)
create_side_vars_constrs!(master, origform, env, annotations)
create_artificial_vars!(master, env)
closefillmode!(getcoefmatrix(master))
push_optimizer!.(Ref(master), getoptimizerbuilders(prob, ann))
push_optimizer!.(Ref(origform), getoptimizerbuilders(prob, ann))
end
# parent is master
function build_formulations!(
spform, reform::Reformulation, prob::Problem, env::Env, annotations::Annotations, parent::Formulation{MasterDuty}
) where {MasterDuty}
ann = annotations.ann_per_form[getuid(spform)]
origform = get_original_formulation(prob)
assign_orig_vars_constrs!(spform, origform, env, annotations, ann)
create_side_vars_constrs!(spform, origform, env, annotations)
closefillmode!(getcoefmatrix(spform))
push_optimizer!.(Ref(spform), getoptimizerbuilders(prob, ann))
end
# Error messages for `check_annotations`.
# TODO: specific error type for these two errors.
_err_check_annotations(id::VarId) = error("""
A variable (id = $id) is not annotated.
Make sure you do not use anonymous variables (variable with no name declared in JuMP macro variable).
Otherwise, open an issue at https://github.com/atoptima/Coluna.jl/issues
""")
_err_check_annotations(id::ConstrId) = error("""
A constraint (id = $id) is not annotated.
Make sure you do not use anonymous constraints (constraint with no name declared in JuMP macro variable).
Otherwise, open an issue at https://github.com/atoptima/Coluna.jl/issues
""")
"""
Make sure that all variables and constraints of the original formulation are
annotated. Otherwise, it returns an error.
"""
function check_annotations(prob::Problem, annotations::Annotations)
origform = get_original_formulation(prob)
for (varid, _) in getvars(origform)
if !haskey(annotations.ann_per_var, varid) && !haskey(annotations.ann_per_repr_var, varid)
return _err_check_annotations(varid)
end
end
for (constrid, _) in getconstrs(origform)
if !haskey(annotations.ann_per_constr, constrid)
return _err_check_annotations(constrid)
end
end
return true
end
function build_reformulation(prob::Problem, annotations::Annotations, env::Env)
end
"""
Reformulate the original formulation of prob according to the annotations.
The environment maintains formulation ids.
"""
function reformulate!(prob::Problem, annotations::Annotations, env::Env)
# Once the original formulation built, we close the "fill mode" of the
# coefficient matrix which is a super fast writing mode compared to the default
# writing mode of the dynamic sparse matrix.
origform = get_original_formulation(prob)
if getcoefmatrix(origform).matrix.fillmode
closefillmode!(getcoefmatrix(origform))
end
decomposition_tree = annotations.tree
if !isnothing(decomposition_tree)
check_annotations(prob, annotations)
root = BD.getroot(decomposition_tree)
master, dw_pricing_subprs, benders_sep_subprs = instantiate_formulations!(prob, env, annotations, origform, root)
reform = Reformulation(env, origform, master, dw_pricing_subprs, benders_sep_subprs)
master.parent_formulation = reform
set_reformulation!(prob, reform)
build_formulations!(reform, prob, env, annotations, origform, root)
relax_integrality!(getmaster(reform))
else # No decomposition provided by BlockDecomposition
push_optimizer!(
prob.original_formulation,
prob.default_optimizer_builder
)
end
return
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 884 | mutable struct Env{Id}
env_starting_time::DateTime
optim_starting_time::Union{Nothing, DateTime}
global_time_limit::Int64 # -1 means no time limit
params::Params
kpis::Kpis
form_counter::Int # 0 is for original form
var_counter::Int
constr_counter::Int
varids::MOI.Utilities.CleverDicts.CleverDict{MOI.VariableIndex, Id}
custom_families_id::Dict{DataType, Int}
end
Env{Id}(params::Params) where {Id} = Env{Id}(
now(), nothing, -1, params, Kpis(nothing, nothing), 0, 0, 0,
MOI.Utilities.CleverDicts.CleverDict{MOI.VariableIndex, Id}(),
Dict{DataType, Int}()
)
set_optim_start_time!(env::Env) = env.optim_starting_time = now()
elapsed_optim_time(env::Env) = Dates.toms(now() - env.optim_starting_time) / Dates.toms(Second(1))
time_limit_reached(env::Env) = env.global_time_limit > 0 && elapsed_optim_time(env) > env.global_time_limit | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 1911 | ############################################################################################
# Algorithm API
############################################################################################
module AlgoAPI
include("MustImplement/MustImplement.jl")
using .MustImplement
include("ColunaBase/ColunaBase.jl")
using .ColunaBase
"
Supertype for algorithms parameters.
Data structures that inherit from this type are intented for the users.
The convention is to define the data structure together with a constructor that contains
only kw args.
For instance:
struct MyAlgorithmParams <: AbstractAlgorithmParams
param1::Int
param2::Int
MyAlgorithmParams(; param1::Int = 1, param2::Int = 2) = new(param1, param2)
end
"
abstract type AbstractAlgorithm end
"""
run!(algo::AbstractAlgorithm, env, model, input)
Default method to call an algorithm.
"""
@mustimplement "Algorithm" run!(algo::AbstractAlgorithm, env, model, input) = nothing
@mustimplement "Algorithm" ismanager(algo::AbstractAlgorithm) = false
"""
Returns `true` if the algorithm will perform changes on the formulation that must be
reverted at the end of the execution of the algorithm; `false` otherwise.
"""
@mustimplement "Algorithm" change_model_state(algo::AbstractAlgorithm) = false
############################################################################################
# Divide Algorithm API
############################################################################################
"""
This algorithm type is used by the tree search algorithm to generate nodes.
"""
abstract type AbstractDivideAlgorithm <: AbstractAlgorithm end
# divide algorithms are always manager algorithms (they manage storing and restoring units)
ismanager(algo::AbstractDivideAlgorithm) = true
#####
# Default tolerances
###
default_opt_atol() = 1e-6
default_opt_rtol() = 1e-5
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 114 | mutable struct Kpis
node_count::Union{Nothing, Int}
elapsed_optimization_time::Union{Nothing, Float64}
end | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 5164 | function _welcome_message()
welcome = """
Coluna
Version $(version()) | https://github.com/atoptima/Coluna.jl
"""
print(welcome)
end
function _adjust_params(params, init_pb)
if params.global_art_var_cost === nothing
if init_pb != Inf && init_pb != -Inf
exp = ceil(log(10, init_pb))
params.global_art_var_cost = 10^(exp + 1)
else
params.global_art_var_cost = 100000.0
end
end
if params.local_art_var_cost === nothing
if init_pb != Inf && init_pb != -Inf
exp = ceil(log(10, init_pb))
params.local_art_var_cost = 10^exp
else
params.local_art_var_cost = 10000.0
end
end
return
end
"""
Starting point of the solver.
"""
function optimize!(env::Env, prob::MathProg.Problem, annotations::Annotations)
_welcome_message()
buffer_reset = prob.original_formulation.buffer
ann_pf_reset = annotations.ann_per_form
# Adjust parameters
## Retrieve initial bounds on the objective given by the user
init_pb = get_initial_primal_bound(prob)
init_db = get_initial_dual_bound(prob)
init_cols = prob.initial_columns_callback
_adjust_params(env.params, init_pb)
# Apply decomposition
reformulate!(prob, annotations, env)
# Coluna ready to start
set_optim_start_time!(env)
@logmsg LogLevel(-1) "Coluna ready to start."
@logmsg LogLevel(-1) env.params
TO.@timeit _to "Coluna" begin
outstate, algstate = optimize!(get_optimization_target(prob), env, init_pb, init_db, init_cols)
end
env.kpis.elapsed_optimization_time = elapsed_optim_time(env)
prob.original_formulation.buffer = buffer_reset
annotations.ann_per_form = ann_pf_reset
println(_to)
TO.reset_timer!(_to)
@logmsg LogLevel(0) "Terminated"
@logmsg LogLevel(0) string("Primal bound: ", get_ip_primal_bound(outstate))
@logmsg LogLevel(0) string("Dual bound: ", get_ip_dual_bound(outstate))
return outstate, algstate
end
function optimize!(
reform::MathProg.Reformulation, env::Env, initial_primal_bound, initial_dual_bound,
initial_columns
)
master = getmaster(reform)
initstate = OptimizationState(
master,
ip_primal_bound = initial_primal_bound,
ip_dual_bound = initial_dual_bound,
lp_dual_bound = initial_dual_bound
)
algorithm = env.params.solver
# retrieve initial columns
MathProg.initialize_solution_pools!(reform, initial_columns)
# initialize all the units used by the algorithm and its child algorithms
Algorithm.initialize_storage_units!(reform, algorithm)
# print(IOContext(stdout, :user_only => true), reform)
algstate = Algorithm.run!(algorithm, env, reform, initstate)
# we copy optimisation state as we want to project the solution to the compact space
outstate = OptimizationState(
master,
termination_status = getterminationstatus(algstate),
ip_primal_bound = get_ip_primal_bound(algstate),
ip_dual_bound = get_ip_dual_bound(algstate),
lp_primal_bound = get_lp_primal_bound(algstate),
lp_dual_bound = get_lp_dual_bound(algstate)
)
ip_primal_sols = get_ip_primal_sols(algstate)
if !isnothing(ip_primal_sols)
for sol in ip_primal_sols
add_ip_primal_sol!(outstate, proj_cols_on_rep(sol))
end
end
# lp_primal_sol may also be of interest, for example when solving the relaxation
lp_primal_sol = get_best_lp_primal_sol(algstate)
if !isnothing(lp_primal_sol)
add_lp_primal_sol!(outstate, proj_cols_on_rep(lp_primal_sol))
end
# lp_dual_sol to retrieve, for instance, the dual value of generated cuts
lp_dual_sol = get_best_lp_dual_sol(algstate)
if !isnothing(lp_dual_sol)
add_lp_dual_sol!(outstate, lp_dual_sol)
end
# It returns two optimisation states.
# The first one contains the solutions projected on the original formulation.
# The second one contains the solutions to the master formulation so the user can
# retrieve the disagreggated solution.
return outstate, algstate
end
function optimize!(
form::MathProg.Formulation, env::Env, initial_primal_bound, initial_dual_bound, _
)
initstate = OptimizationState(
form,
ip_primal_bound = initial_primal_bound,
ip_dual_bound = initial_dual_bound,
lp_dual_bound = initial_dual_bound
)
algorithm = env.params.solver
output = Algorithm.run!(algorithm, env, form, initstate)
return output, nothing
end
"""
Fallback if no solver provided by the user.
"""
function optimize!(::MathProg.Reformulation, ::Nothing, ::Real, ::Real, _)
error("""
No solver to optimize the reformulation. You should provide a solver through Coluna parameters.
Please, check the starting guide of Coluna.
""")
end
function optimize!(::MathProg.Formulation, ::Nothing, ::Real, ::Real, _)
error("""
No solver to optimize the formulation. You should provide a solver through Coluna parameters.
Please, check the starting guide of Coluna.
""")
end | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 589 | """
Coluna.Params(
solver = Coluna.Algorithm.TreeSearchAlgorithm(),
global_art_var_cost = 10e6,
local_art_var_cost = 10e4
)
Parameters of Coluna :
- `solver` is the algorithm used to optimize the reformulation.
- `global_art_var_cost` is the cost of the global artificial variables in the master
- `local_art_var_cost` is the cost of the local artificial variables in the master
"""
@with_kw mutable struct Params
global_art_var_cost::Union{Float64, Nothing} = nothing
local_art_var_cost::Union{Float64, Nothing} = nothing
solver = nothing
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 5233 | module Algorithm
using DataStructures
import MathOptInterface
import TimerOutputs
using ..Coluna, ..ColunaBase, ..MathProg, ..MustImplement, ..ColGen, ..Benders, ..AlgoAPI, ..TreeSearch, ..Branching
using Crayons, DynamicSparseArrays, Logging, Parameters, Printf, Random, Statistics, SparseArrays, LinearAlgebra
const TO = TimerOutputs
const DS = DataStructures
const MOI = MathOptInterface
const ClB = ColunaBase
import Base: push!
############################################################################################
# Incumbent primal bound handler
############################################################################################
"""
Abstract type for an utilarity structure that handles the incumbent primal bound.
"""
abstract type AbstractGlobalPrimalBoundHandler end
@mustimplement "GlobalPrimalBoundHandler" get_global_primal_bound(m::AbstractGlobalPrimalBoundHandler) = nothing
@mustimplement "GlobalPrimalBoundHandler" get_global_primal_sol(m::AbstractGlobalPrimalBoundHandler) = nothing
@mustimplement "GlobalPrimalBoundHandler" store_ip_primal_sol!(m::AbstractGlobalPrimalBoundHandler, sol) = nothing
# Utilities to build algorithms
include("utilities/optimizationstate.jl")
include("utilities/helpers.jl")
############################################################################################
# Primal bound manager
############################################################################################
"""
Default implementation of a manager of the incumbent primal bound.
This implementation does not support paralellization.
"""
struct GlobalPrimalBoundHandler <: AbstractGlobalPrimalBoundHandler
# It only stores the IP primal solutions.
optstate::OptimizationState
end
function GlobalPrimalBoundHandler(
reform::Reformulation;
ip_primal_bound = nothing
)
opt_state = OptimizationState(getmaster(reform))
if !isnothing(ip_primal_bound)
set_ip_primal_bound!(opt_state, ip_primal_bound)
end
return GlobalPrimalBoundHandler(opt_state)
end
get_global_primal_bound(manager::GlobalPrimalBoundHandler) = get_ip_primal_bound(manager.optstate)
get_global_primal_sol(manager::GlobalPrimalBoundHandler) = get_best_ip_primal_sol(manager.optstate)
store_ip_primal_sol!(manager::GlobalPrimalBoundHandler, sol) = add_ip_primal_sols!(manager.optstate, sol)
############################################################################################
# API on top of storage API
include("data.jl")
# Algorithm interface
include("interface.jl")
# Storage units & records implementation
include("formstorages.jl")
# Basic algorithms
include("basic/subsolvers.jl")
include("basic/solvelpform.jl")
include("basic/solveipform.jl")
include("basic/cutcallback.jl")
# Child algorithms used by conquer algorithms
include("colgenstabilization.jl")
# Column generation
include("colgen/utils.jl")
include("colgen/stabilization.jl")
include("colgen/default.jl")
include("colgen/printer.jl")
include("colgen.jl")
# Benders algorithm
include("benders/utils.jl")
include("benders/default.jl")
include("benders/printer.jl")
include("benders.jl")
# Presolve
include("presolve/helpers.jl")
include("presolve/interface.jl")
include("presolve/propagation.jl")
# Conquer
include("conquer.jl")
# Here include divide algorithms
include("branching/interface.jl")
include("branching/sbnode.jl")
include("branching/selectioncriteria.jl")
include("branching/scores.jl")
include("branching/single_var_branching.jl")
include("branching/printer.jl")
include("branching/branchingalgo.jl")
# Heuristics
include("heuristic/restricted_master.jl")
# Tree search
include("treesearch.jl")
include("treesearch/printer.jl")
include("treesearch/branch_and_bound.jl")
include("branchcutprice.jl")
# Algorithm should export only methods usefull to define & parametrize algorithms, and
# data structures from utilities.
# Other Coluna's submodules should be independent to Algorithm
# Utilities
export getterminationstatus, setterminationstatus!,
get_ip_primal_sols, get_lp_primal_sols, get_lp_dual_sols, get_best_ip_primal_sol,
get_best_lp_primal_sol, get_best_lp_dual_sol, update_ip_primal_sol!,
update_lp_primal_sol!, update_lp_dual_sol!, add_ip_primal_sol!, add_ip_primal_sols!,
add_lp_primal_sol!, add_lp_dual_sol!, set_ip_primal_sol!, set_lp_primal_sol!, set_lp_dual_sol!,
empty_ip_primal_sols!, empty_lp_primal_sols!, empty_lp_dual_sols!,
get_ip_primal_bound, get_lp_primal_bound, get_lp_dual_bound, get_ip_dual_bound,
set_ip_primal_bound!, set_lp_primal_bound!, set_lp_dual_bound!, set_ip_dual_bound!,
update_ip_primal_bound!, update_lp_primal_bound!, update_lp_dual_bound!, update_ip_dual_bound!,
run!
# Algorithms
export TreeSearchAlgorithm, ColCutGenConquer, ColumnGeneration, BendersConquer, BendersCutGeneration, SolveIpForm, RestrictedMasterIPHeuristic,
SolveLpForm, NoBranching, Branching, StrongBranching, PresolveAlgorithm, PresolveOutput,
FirstFoundCriterion, MostFractionalCriterion, LeastFractionalCriterion, ClosestToNonZeroIntegerCriterion, SingleVarBranchingRule
# Algorithm's types
export AbstractOptimizationAlgorithm,
OptimizationState
# Types of optimizers
export MoiOptimize, UserOptimizer
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 6437 | """
Coluna.Algorithm.BendersCutGeneration(
restr_master_solve_alg = SolveLpForm(get_dual_sol = true, relax_integrality = true),
restr_master_optimizer_id = 1,
separation_solve_alg = SolveLpForm(get_dual_sol = true, relax_integrality = true)
max_nb_iterations::Int = 100,
)
Benders cut generation algorithm that can be applied to a formulation reformulated using
Benders decomposition.
This algorithm is an implementation of the generic algorithm provided by the `Benders`
submodule.
**Parameters:**
- `restr_master_solve_alg`: algorithm to solve the restricted master problem
- `restr_master_optimizer_id`: optimizer id to use to solve the restricted master problem
- `separation_solve_alg`: algorithm to solve the separation problem (must be a LP solver that returns a dual solution)
**Option:**
- `max_nb_iterations`: maximum number of iterations
## About the output
At each iteration, the Benders cut generation algorithm show following statistics:
<it= 6> <et= 0.05> <mst= 0.00> <sp= 0.00> <cuts= 0> <master= 293.5000>
where:
- `it` stands for the current number of iterations of the algorithm
- `et` is the elapsed time in seconds since Coluna has started the optimisation
- `mst` is the time in seconds spent solving the master problem at the current iteration
- `sp` is the time in seconds spent solving the separation problem at the current iteration
- `cuts` is the number of cuts generated at the current iteration
- `master` is the objective value of the master problem at the current iteration
**Debug options** (print at each iteration):
- `debug_print_master`: print the master problem
- `debug_print_master_primal_solution`: print the master problem with the primal solution
- `debug_print_master_dual_solution`: print the master problem with the dual solution (make sure the `restr_master_solve_alg` returns a dual solution)
- `debug_print_subproblem`: print the subproblem
- `debug_print_subproblem_primal_solution`: print the subproblem with the primal solution
- `debug_print_subproblem_dual_solution`: print the subproblem with the dual solution
- `debug_print_generated_cuts`: print the generated cuts
"""
struct BendersCutGeneration <: AbstractOptimizationAlgorithm
restr_master_solve_alg::Union{SolveLpForm, SolveIpForm}
restr_master_optimizer_id::Int
feasibility_tol::Float64
optimality_tol::Float64
max_nb_iterations::Int
separation_solve_alg::SolveLpForm
print::Bool
debug_print_master::Bool
debug_print_master_primal_solution::Bool
debug_print_master_dual_solution::Bool
debug_print_subproblem::Bool
debug_print_subproblem_primal_solution::Bool
debug_print_subproblem_dual_solution::Bool
debug_print_generated_cuts::Bool
BendersCutGeneration(;
restr_master_solve_alg = SolveLpForm(get_dual_sol = true, relax_integrality = true),
restr_master_optimizer_id = 1,
feasibility_tol = 1e-5,
optimality_tol = Coluna.DEF_OPTIMALITY_ATOL,
max_nb_iterations = 100,
separation_solve_alg = SolveLpForm(get_dual_sol = true, relax_integrality = true),
print = true,
debug_print_master = false,
debug_print_master_primal_solution = false,
debug_print_master_dual_solution = false,
debug_print_subproblem = false,
debug_print_subproblem_primal_solution = false,
debug_print_subproblem_dual_solution = false,
debug_print_generated_cuts = false
) = new(
restr_master_solve_alg,
restr_master_optimizer_id,
feasibility_tol,
optimality_tol,
max_nb_iterations,
separation_solve_alg,
print,
debug_print_master,
debug_print_master_primal_solution,
debug_print_master_dual_solution,
debug_print_subproblem,
debug_print_subproblem_primal_solution,
debug_print_subproblem_dual_solution,
debug_print_generated_cuts
)
end
# TO DO : BendersCutGeneration does not have yet the child algorithms
# it should have at least the algorithm to solve the master LP and the algorithms
# to solve the subproblems
function get_units_usage(algo::BendersCutGeneration, reform::Reformulation)
units_usage = Tuple{AbstractModel, UnitType, UnitPermission}[]
master = getmaster(reform)
push!(units_usage, (master, MasterCutsUnit, READ_AND_WRITE))
# TO DO : everything else should be communicated by the child algorithms
push!(units_usage, (master, StaticVarConstrUnit, READ_ONLY))
push!(units_usage, (master, PartialSolutionUnit, READ_ONLY))
push!(units_usage, (master, MasterBranchConstrsUnit, READ_ONLY))
push!(units_usage, (master, MasterColumnsUnit, READ_ONLY))
for (_, spform) in get_benders_sep_sps(reform)
push!(units_usage, (spform, StaticVarConstrUnit, READ_ONLY))
end
return units_usage
end
function _new_context(C::Type{<:Benders.AbstractBendersContext}, reform, algo)
return C(reform, algo)
end
# TODO: fis this method
function _benders_optstate_output(result, master)
optstate = OptimizationState(master)
if result.infeasible
setterminationstatus!(optstate, INFEASIBLE)
end
if !isnothing(result.ip_primal_sol)
set_lp_primal_sol!(optstate, result.ip_primal_sol)
end
if !isnothing(result.mlp)
set_lp_dual_bound!(optstate, DualBound(master, result.mlp))
set_ip_dual_bound!(optstate, DualBound(master, result.mlp))
set_lp_primal_bound!(optstate, PrimalBound(master, result.mlp))
end
return optstate
end
function run!(
algo::BendersCutGeneration, env::Env, reform::Reformulation, input::OptimizationState
)
ctx = Coluna.Algorithm.BendersPrinterContext(
reform, algo;
print = true,
debug_print_master = algo.debug_print_master,
debug_print_master_primal_solution = algo.debug_print_master_primal_solution,
debug_print_master_dual_solution = algo.debug_print_master_dual_solution,
debug_print_subproblem = algo.debug_print_subproblem,
debug_print_subproblem_primal_solution = algo.debug_print_subproblem_primal_solution,
debug_print_subproblem_dual_solution = algo.debug_print_subproblem_dual_solution,
debug_print_generated_cuts = algo.debug_print_generated_cuts
)
result = Benders.run_benders_loop!(ctx, env)
return _benders_optstate_output(result, getmaster(reform))
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 8901 | """
Coluna.Algorithm.BranchCutAndPriceAlgorithm(;
run_presolve::Bool = false,
maxnumnodes::Int = 100000,
opt_atol::Float64 = Coluna.DEF_OPTIMALITY_ATOL,
opt_rtol::Float64 = Coluna.DEF_OPTIMALITY_RTOL,
restmastipheur_timelimit::Int = 600,
restmastipheur_frequency::Int = 1,
restmastipheur_maxdepth::Int = 1000,
max_nb_cut_rounds::Int = 3,
colgen_stabilization::Float64 = 0.0,
colgen_cleanup_threshold::Int = 10000,
colgen_stages_pricing_solvers::Vector{Int} = [1],
colgen_strict_integrality_check::Bool,
stbranch_phases_num_candidates::Vector{Int} = Int[],
stbranch_intrmphase_stages::Vector{NamedTuple{(:userstage, :solverid, :maxiters), Tuple{Int64, Int64, Int64}}}
)
Alias for a simplified parameterisation
of the branch-cut-and-price algorithm.
Parameters :
- `run_presolve` : whether to call the presolve algorithm before running the branch-cut-and-price
- `maxnumnodes` : maximum number of nodes explored by the algorithm
- `opt_atol` : optimality absolute tolerance
- `opt_rtol` : optimality relative tolerance
- `restmastipheur_timelimit` : time limit in seconds for the restricted master heuristic
(if <= 0 then the heuristic is disabled)
- `restmastipheur_frequency` : frequency of calls to the restricted master heuristic
- `restmastipheur_maxdepth` : maximum depth of the search tree when the restricted master heuristic is called
- `max_nb_cut_rounds` : maximum number of cut generation rounds in every node of the search tree
- `colgen_stabilization` : parameterisation of the dual price smoothing stabilization of column generation
0.0 - disabled, 1.0 - automatic, ∈(0.0,1.0) - fixed smoothing parameter
- `colgen_cleanup_threshold` : threshold (number of active columns) to trigger the restricted master LP clean up
- `colgen_stages_pricing_solvers` : vector of pricing solver ids for every column generation stage,
pricing solvers should be specified using argument `solver` of `BlockDecomposition.specify!()`,
the number of column generation stages is equal to the length of this vector,
column generation stages are executed in the reverse order,
the first stage should be exact to ensure the optimality of the BCP algorithm
- `colgen_strict_integrality_check` : see description in `Coluna.Algorithm.ColumnGeneration`
- `stbranch_phases_num_candidates` : maximum number of candidates for each strong branching phase,
strong branching is activated if this vector is not empty,
the number of phases in strong branching is equal to min{3, length(stbranch_phases_num_candidates)},
in the last phase, the standard column-and-cut generation procedure is run,
in the first phase (if their number is >= 2), only the restricted master LP is resolved,
in the second (intermediate) phase (if their number is >= 3), usually a heuristic pricing is used
or the number of column generation iterations is limited, this is parameterised with the three
next parameters, cut separation is not called in the intermediate strong branching phase,
if the lenth of this vector > 3, then all values except first, second, and last ones are ignored
- `stbranch_intrmphase_stages` : the size of this vector is the number of column generation stages in the intemediate phase of strong branching
each element of the vector is the named triple (userstage, solver, maxiters). "userstage" is the
value of "stage" parameter passed to the pricing callback on this stage, "solverid" is the solver id on this stage,
and "maxiters" is the maximum number of column generation iterations on this stage
"""
function BranchCutAndPriceAlgorithm(;
run_presolve::Bool = false,
maxnumnodes::Int = 100000,
branchingtreefile::String = "",
jsonfile::String = "",
opt_atol::Float64 = Coluna.DEF_OPTIMALITY_ATOL,
opt_rtol::Float64 = Coluna.DEF_OPTIMALITY_RTOL,
restmastipheur_timelimit::Int = 600,
restmastipheur_frequency::Int = 1,
restmastipheur_maxdepth::Int = 1000,
max_nb_cut_rounds::Int = 3,
colgen_stabilization::Float64 = 0.0,
colgen_cleanup_threshold::Int = 10000,
colgen_stages_pricing_solvers::Vector{Int64} = [1],
colgen_strict_integrality_check::Bool = false,
stbranch_phases_num_candidates::Vector{Int64} = Int[],
stbranch_intrmphase_stages::Vector{NamedTuple{(:userstage, :solverid, :maxiters), Tuple{Int64, Int64, Int64}}} = [(userstage=1, solverid=1, maxiters=100)]
)
heuristics = ParameterizedHeuristic[]
if restmastipheur_timelimit > 0
heuristic = ParameterizedHeuristic(
RestrictedMasterHeuristic(),
1.0, 1.0, restmastipheur_frequency,
restmastipheur_maxdepth, "Restricted Master IP"
)
push!(heuristics, heuristic)
end
colgen = ColumnGeneration(
pricing_prob_solve_alg = SolveIpForm(
user_params = UserOptimize(),
moi_params = MoiOptimize(
deactivate_artificial_vars = false,
enforce_integrality = false
)
),
strict_integrality_check = colgen_strict_integrality_check,
stages_pricing_solver_ids = colgen_stages_pricing_solvers,
smoothing_stabilization = colgen_stabilization,
cleanup_threshold = colgen_cleanup_threshold,
opt_atol = opt_atol,
opt_rtol = opt_rtol
)
conquer = ColCutGenConquer(
colgen = colgen,
max_nb_cut_rounds = max_nb_cut_rounds,
primal_heuristics = heuristics,
opt_atol = opt_atol,
opt_rtol = opt_rtol
)
branching = NoBranching()
branching_rules = Branching.PrioritisedBranchingRule[Branching.PrioritisedBranchingRule(SingleVarBranchingRule(), 1.0, 1.0)]
if !isempty(stbranch_phases_num_candidates)
branching_phases = BranchingPhase[]
if length(stbranch_phases_num_candidates) >= 2
push!(branching_phases,
BranchingPhase(first(stbranch_phases_num_candidates), RestrMasterLPConquer(), ProductScore())
)
if length(stbranch_phases_num_candidates) >= 3
colgen = ColumnGeneration(
pricing_prob_solve_alg = SolveIpForm(
user_params = UserOptimize(),
moi_params = MoiOptimize(
deactivate_artificial_vars = false,
enforce_integrality = false
)
),
strict_integrality_check = colgen_strict_integrality_check,
stages_pricing_solver_ids = map(t -> t.solverid, stbranch_intrmphase_stages),
smoothing_stabilization = colgen_stabilization,
cleanup_threshold = colgen_cleanup_threshold,
max_nb_iterations = mapreduce(t -> t.maxiters, +, stbranch_intrmphase_stages),
opt_atol = opt_atol,
opt_rtol = opt_rtol
)
intrmphase_conquer = ColCutGenConquer(
#stages = intrmphase_stages,
colgen = colgen,
max_nb_cut_rounds = 0,
primal_heuristics = [],
opt_atol = opt_atol,
opt_rtol = opt_rtol
)
push!(branching_phases,
BranchingPhase(stbranch_phases_num_candidates[2], intrmphase_conquer, ProductScore())
)
end
end
push!(branching_phases, BranchingPhase(last(stbranch_phases_num_candidates), conquer, TreeDepthScore()))
branching = StrongBranching(rules = branching_rules, phases = branching_phases)
else
branching = ClassicBranching(rules = branching_rules)
end
return TreeSearchAlgorithm(
presolvealg = run_presolve ? PresolveAlgorithm() : nothing,
conqueralg = conquer,
dividealg = branching,
maxnumnodes = maxnumnodes,
branchingtreefile = branchingtreefile,
jsonfile = jsonfile,
opt_atol = opt_atol;
opt_rtol = opt_rtol
)
end | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 11190 | """
Coluna.Algorithm.ColumnGeneration(
restr_master_solve_alg = SolveLpForm(get_dual_sol = true),
pricing_prob_solve_alg = SolveIpForm(
moi_params = MoiOptimize(
deactivate_artificial_vars = false,
enforce_integrality = false
)
),
essential_cut_gen_alg = CutCallbacks(call_robust_facultative = false),
strict_integrality_check = false,
max_nb_iterations = 1000,
log_print_frequency = 1,
redcost_tol = 1e-4,
show_column_already_inserted_warning = true,
cleanup_threshold = 10000,
cleanup_ratio = 0.66,
smoothing_stabilization = 0.0 # should be in [0, 1],
)
Column generation algorithm that can be applied to formulation reformulated using
Dantzig-Wolfe decomposition.
This algorithm first solves the linear relaxation of the master (master LP) using `restr_master_solve_alg`.
Then, it solves the subproblems by calling `pricing_prob_solve_alg` to get the columns that
have the best reduced costs and that hence, may improve the master LP's objective the most.
In order for the algorithm to converge towards the optimal solution of the master LP,
it suffices that the pricing oracle returns, at each iteration, a negative reduced cost solution if one exists.
The algorithm stops when all subproblems fail to generate a column with negative
(positive) reduced cost in the case of a minimization (maximization) problem or when it
reaches the maximum number of iterations.
**Parameters:**
- `restr_master_solve_alg`: algorithm to optimize the master LP
- `pricing_prob_solve_alg`: algorithm to optimize the subproblems
- `essential_cut_gen_alg`: algorithm to generate essential cuts which is run when the solution of the master LP is integer.
**Options:**
- `max_nb_iterations`: maximum number of iterations
- `log_print_frequency`: display frequency of iterations statistics
- `strict_integrality_check`: by default (value `false`) the integrality check in column generation is performed by the mapping procedure
from "F. Vanderbeck, Branching in branch-and-price: a generic scheme, Math.Prog. (2011)";
in the case the pricing subproblems are solved by a callback, and some subproblem integer variables
are "hidden" from _Coluna_, the mapping procedure may not be valid, and the integrality should be checked
in the "strict" way (explicitly verifying that all columns are integer)
Undocumented parameters are in alpha version.
## About the ouput
At each iteration (depending on `log_print_frequency`),
the column generation algorithm can display following statistics.
<it= 90> <et=15.62> <mst= 0.02> <sp= 0.05> <cols= 4> <al= 0.00> <DB= 300.2921> <mlp= 310.3000> <PB=310.3000>
Here are their meanings :
- `it` stands for the current number of iterations of the algorithm
- `et` is the elapsed time in seconds since Coluna has started the optimisation
- `mst` is the time in seconds spent solving the master LP at the current iteration
- `sp` is the time in seconds spent solving the subproblems at the current iteration
- `cols` is the number of column generated by the subproblems at the current iteration
- `al` is the smoothing factor of the stabilisation at the current iteration (alpha version)
- `DB` is the dual bound of the master LP at the current iteration
- `mlp` is the objective value of the master LP at the current iteration
- `PB` is the objective value of the best primal solution found by Coluna at the current iteration
"""
struct ColumnGeneration <: AbstractOptimizationAlgorithm
restr_master_solve_alg::SolveLpForm
restr_master_optimizer_id::Int
pricing_prob_solve_alg::SolveIpForm
stages_pricing_solver_ids::Vector{Int}
essential_cut_gen_alg::CutCallbacks
strict_integrality_check::Bool
max_nb_iterations::Int64
log_print_frequency::Int64
store_all_ip_primal_sols::Bool
redcost_tol::Float64
show_column_already_inserted_warning::Bool
throw_column_already_inserted_warning::Bool
solve_subproblems_parallel::Bool
cleanup_threshold::Int64
cleanup_ratio::Float64
smoothing_stabilization::Float64
opt_atol::Float64
opt_rtol::Float64
print::Bool
ColumnGeneration(;
restr_master_solve_alg = SolveLpForm(get_dual_sol=true),
restr_master_optimizer_id = 1,
pricing_prob_solve_alg = SolveIpForm(
moi_params = MoiOptimize(
deactivate_artificial_vars = false,
enforce_integrality = false
)
),
stages_pricing_solver_ids = [1],
essential_cut_gen_alg = CutCallbacks(call_robust_facultative=false),
strict_integrality_check = false,
max_nb_iterations = 1000,
log_print_frequency = 1,
store_all_ip_primal_sols = false,
redcost_tol = 1e-4,
show_column_already_inserted_warning = true,
throw_column_already_inserted_warning = false,
solve_subproblems_parallel = false,
cleanup_threshold = 10000,
cleanup_ratio = 0.66,
smoothing_stabilization = 0.0, # should be in [0, 1]
opt_atol = Coluna.DEF_OPTIMALITY_ATOL,
opt_rtol = Coluna.DEF_OPTIMALITY_RTOL,
print = true
) = new(
restr_master_solve_alg,
restr_master_optimizer_id,
pricing_prob_solve_alg,
stages_pricing_solver_ids,
essential_cut_gen_alg,
strict_integrality_check,
max_nb_iterations,
log_print_frequency,
store_all_ip_primal_sols,
redcost_tol,
show_column_already_inserted_warning,
throw_column_already_inserted_warning,
solve_subproblems_parallel,
cleanup_threshold,
cleanup_ratio,
smoothing_stabilization,
opt_atol,
opt_rtol,
print
)
end
############################################################################################
# Column generation parameters checker.
# `check_parameter` returns true by default
############################################################################################1
# function check_parameter(::ColumnGeneration, ::Val{:restr_master_solve_alg}, value, reform)
# end
# function check_parameter(::ColumnGeneration, ::Val{:restr_master_optimizer_id}, value, reform)
# end
# function check_parameter(::ColumnGeneration, ::Val{:pricing_prob_solve_alg}, value, reform)
# end
# function check_parameter(::ColumnGeneration, ::Val{:stages_pricing_solver_ids}, value, reform)
# end
# function check_parameter(::ColumnGeneration, ::Val{:essential_cut_gen_alg}, value, reform)
# end
check_parameter(::ColumnGeneration, ::Val{:max_nb_iterations}, value, reform) = value > 0
check_parameter(::ColumnGeneration, ::Val{:log_print_frequency}, value, reform) = value > 1
check_parameter(::ColumnGeneration, ::Val{:redcost_tol}, value, reform) = value > 0
check_parameter(::ColumnGeneration, ::Val{:cleanup_threshold}, value, reform) = value > 0
check_parameter(::ColumnGeneration, ::Val{:cleanup_ratio}, value, reform) = 0 < value < 1
check_parameter(::ColumnGeneration, ::Val{:smoothing_stabilization}, value, reform) = 0 <= value <= 1
check_parameter(::ColumnGeneration, ::Val{:opt_atol}, value, reform) = value > 0
check_parameter(::ColumnGeneration, ::Val{:opt_rtol}, value, reform) = value > 0
stabilization_is_used(algo::ColumnGeneration) = !iszero(algo.smoothing_stabilization)
############################################################################################
# Implementation of Algorithm interface.
############################################################################################
function get_child_algorithms(algo::ColumnGeneration, reform::Reformulation)
child_algs = Dict{String, Tuple{AlgoAPI.AbstractAlgorithm, MathProg.Formulation}}(
"restr_master_solve_alg" => (algo.restr_master_solve_alg, getmaster(reform)),
"essential_cut_gen_alg" => (algo.essential_cut_gen_alg, getmaster(reform))
)
for (id, spform) in get_dw_pricing_sps(reform)
child_algs["pricing_prob_solve_alg_sp$id"] = (algo.pricing_prob_solve_alg, spform)
end
return child_algs
end
function get_units_usage(algo::ColumnGeneration, reform::Reformulation)
units_usage = Tuple{AbstractModel,UnitType,UnitPermission}[]
master = getmaster(reform)
push!(units_usage, (master, MasterColumnsUnit, READ_AND_WRITE))
push!(units_usage, (master, StaticVarConstrUnit, READ_ONLY))
push!(units_usage, (master, PartialSolutionUnit, READ_ONLY))
# as column generation may call essential cut callbacks
# TO DO: it would be good to verify first whether any callback is really defined
push!(units_usage, (master, MasterCutsUnit, READ_AND_WRITE))
for (_, spform) in get_dw_pricing_sps(reform)
push!(units_usage, (spform, StaticVarConstrUnit, READ_ONLY))
end
if stabilization_is_used(algo)
#push!(units_usage, (master, ColGenStabilizationUnit, READ_AND_WRITE))
end
return units_usage
end
############################################################################################
# Column generation algorithm.
############################################################################################
function _colgen_context(algo::ColumnGeneration)
algo.print && return ColGenPrinterContext
return ColGenContext
end
function _new_context(C::Type{<:ColGen.AbstractColGenContext}, reform, algo)
return C(reform, algo)
end
function _colgen_optstate_output(result, master)
optstate = OptimizationState(master)
if ColGen.is_infeasible(result)
# If the column generation finds the problem infeasible, we consider that all the
# other information are irrelevant.
setterminationstatus!(optstate, INFEASIBLE)
else
lp_primal_sol = ColGen.get_master_lp_primal_sol(result)
if !isnothing(lp_primal_sol)
set_lp_primal_sol!(optstate, lp_primal_sol)
end
ip_primal_sol = ColGen.get_master_ip_primal_sol(result)
if !isnothing(ip_primal_sol)
update_ip_primal_sol!(optstate, ip_primal_sol)
end
lp_dual_sol = ColGen.get_master_dual_sol(result)
if !isnothing(lp_dual_sol)
update_lp_dual_sol!(optstate, lp_dual_sol)
end
db = ColGen.get_dual_bound(result)
if !isnothing(result.db)
set_lp_dual_bound!(optstate, DualBound(master, db))
set_ip_dual_bound!(optstate, DualBound(master, db))
end
mlp = ColGen.get_master_lp_primal_bound(result)
if !isnothing(mlp)
set_lp_primal_bound!(optstate, PrimalBound(master, mlp))
end
end
return optstate
end
function run!(algo::ColumnGeneration, env::Env, reform::Reformulation, input::AbstractConquerInput)
# We build
C = _colgen_context(algo)
ctx = _new_context(C, reform, algo)
result = ColGen.run!(ctx, env, get_global_primal_handler(input))
master = getmaster(reform)
return _colgen_optstate_output(result, master)
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 2030 | # mutable struct ColGenStabilizationUnit <: AbstractRecordUnit
# basealpha::Float64 # "global" alpha parameter
# curalpha::Float64 # alpha parameter during the current misprice sequence
# nb_misprices::Int64 # number of misprices during the current misprice sequence
# pseudo_dual_bound::ColunaBase.Bound # pseudo dual bound, may be non-valid, f.e. when the pricing problem solved heuristically
# valid_dual_bound::ColunaBase.Bound # valid dual bound
# stabcenter::Union{Nothing,DualSolution} # current stability center, correspond to cur_dual_bound
# newstabcenter::Union{Nothing,DualSolution} # to keep temporarily stab. center after update
# basestabcenter::Union{Nothing,DualSolution} # stability center, corresponding to valid_dual_bound
# end
# function ClB.storage_unit(::Type{ColGenStabilizationUnit}, master::Formulation{DwMaster})
# return ColGenStabilizationUnit(
# 0.5, 0.0, 0, DualBound(master), DualBound(master), nothing, nothing, nothing
# )
# end
# mutable struct ColGenStabRecord <: AbstractRecord
# alpha::Float64
# dualbound::ColunaBase.Bound
# stabcenter::Union{Nothing,DualSolution}
# end
# struct ColGenStabKey <: AbstractStorageUnitKey end
# key_from_storage_unit_type(::Type{ColGenStabilizationUnit}) = ColGenStabKey()
# record_type_from_key(::ColGenStabKey) = ColGenStabRecord
# function ClB.record(::Type{ColGenStabRecord}, id::Int, form::Formulation, unit::ColGenStabilizationUnit)
# alpha = unit.basealpha < 0.5 ? 0.5 : unit.basealpha
# return ColGenStabRecord(alpha, unit.valid_dual_bound, unit.basestabcenter)
# end
# ClB.record_type(::Type{ColGenStabilizationUnit}) = ColGenStabRecord
# ClB.storage_unit_type(::Type{ColGenStabRecord}) = ColGenStabilizationUnit
# function ClB.restore_from_record!(
# ::Formulation, unit::ColGenStabilizationUnit, state::ColGenStabRecord
# )
# unit.basealpha = state.alpha
# unit.valid_dual_bound = state.dualbound
# unit.basestabcenter = state.stabcenter
# return
# end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 16357 | ####################################################################
# ParameterizedHeuristic
####################################################################
struct ParameterizedHeuristic{A <: AbstractOptimizationAlgorithm}
algorithm::A
root_priority::Float64
nonroot_priority::Float64
frequency::Integer
max_depth::Integer
name::String
end
ParamRestrictedMasterHeuristic() =
ParameterizedHeuristic(
RestrictedMasterHeuristic(),
1.0, 1.0, 1, 1000, "Restricted Master IP"
)
####################################################################
# NodeFinalizer
####################################################################
struct NodeFinalizer <: AbstractConquerAlgorithm
algorithm::AbstractOptimizationAlgorithm
min_depth::Integer
name::String
end
####################################################################
# BeforeCutGenAlgo
####################################################################
"Algorithm called before cut generation."
struct BeforeCutGenAlgo <: AbstractConquerAlgorithm
algorithm::AbstractOptimizationAlgorithm
name::String
end
get_child_algorithms(algo::BeforeCutGenAlgo, reform::Reformulation) = Dict("algorithm" => (algo.algorithm, reform))
####################################################################
# BendersConquer
####################################################################
@with_kw struct BendersConquer <: AbstractConquerAlgorithm
benders::BendersCutGeneration = BendersCutGeneration()
end
# BendersConquer does not use any unit for the moment, it just calls
# BendersCutSeparation algorithm, therefore get_units_usage() is not defined for it
get_child_algorithms(algo::BendersConquer, reform::Reformulation) = Dict("benders" => (algo.benders, reform))
function run!(algo::BendersConquer, env::Env, reform::Reformulation, input::AbstractConquerInput)
# !run_conquer(input) && return
# node = getnode(input)
# node_state = <optimization state of node>
# output = run!(algo.benders, env, reform, node_state)
# update!(node_state, output)
return
end
####################################################################
# ColCutGenConquer
####################################################################
"""
Coluna.Algorithm.ColCutGenConquer(
colgen = ColumnGeneration(),
cutgen = CutCallbacks(),
primal_heuristics = ParameterizedHeuristic[ParamRestrictedMasterHeuristic()],
max_nb_cut_rounds = 3
)
Column-and-cut-generation based algorithm to find primal and dual bounds for a
problem decomposed using Dantzig-Wolfe paradigm.
Parameters :
- `colgen`: column generation algorithm
- `cutgen`: cut generation algorithm
- `primal_heuristics`: heuristics to find a feasible solution
- `max_nb_cut_rounds` : number of cut generation done by the algorithm
"""
struct ColCutGenConquer <: AbstractConquerAlgorithm
colgen::ColumnGeneration
primal_heuristics::Vector{ParameterizedHeuristic}
before_cutgen_user_algorithm::Union{Nothing, BeforeCutGenAlgo}
node_finalizer::Union{Nothing, NodeFinalizer}
preprocess
cutgen
max_nb_cut_rounds::Int # TODO : tailing-off ?
opt_atol::Float64# TODO : force this value in an init() method
opt_rtol::Float64 # TODO : force this value in an init() method
verbose::Bool
end
ColCutGenConquer(;
colgen = ColumnGeneration(),
primal_heuristics = [ParamRestrictedMasterHeuristic()],
before_cutgen_user_algorithm = nothing,
node_finalizer = nothing,
preprocess = nothing,
cutgen = CutCallbacks(),
max_nb_cut_rounds = 3,
opt_atol = AlgoAPI.default_opt_atol(),
opt_rtol = AlgoAPI.default_opt_rtol(),
verbose = true
) = ColCutGenConquer(
colgen,
primal_heuristics,
before_cutgen_user_algorithm,
node_finalizer,
preprocess,
cutgen,
max_nb_cut_rounds,
opt_atol,
opt_rtol,
verbose
)
# ColCutGenConquer does not use any storage unit for the moment, therefore
# get_units_usage() is not defined for i
function get_child_algorithms(algo::ColCutGenConquer, reform::Reformulation)
child_algos = Dict(
"colgen" => (algo.colgen, reform),
"cutgen" => (algo.cutgen, getmaster(reform))
)
if !isnothing(algo.preprocess)
child_algos["preprocess"] = (algo.preprocess, reform)
end
if !isnothing(algo.node_finalizer)
child_algos["node_finalizer"] = (algo.node_finalizer, reform)
end
if !isnothing(algo.before_cutgen_user_algorithm)
child_algos["before_cutgen_user"] = (algo.before_cutgen_user_algorithm, reform)
end
for heuristic in algo.primal_heuristics
child_algos["heuristic_$(heuristic.name)"] = (heuristic.algorithm, reform)
end
return child_algos
end
struct ColCutGenContext
params::ColCutGenConquer
end
function type_of_context(::ColCutGenConquer)
return ColCutGenContext
end
function new_context(::Type{ColCutGenContext}, algo::ColCutGenConquer, reform, input)
return ColCutGenContext(algo)
end
"""
Runs a round of cut generation.
Returns `true` if at least one cut is separated; `false` otherwise.
"""
function run_cutgen!(::ColCutGenContext, env, reform, sol)
cutcb_output = run!(CutCallbacks(), env, getmaster(reform), CutCallbacksInput(sol))
cuts_were_added = cutcb_output.nb_cuts_added + cutcb_output.nb_essential_cuts_added > 0
return cuts_were_added
end
"""
Runs a column generation algorithm and updates the optimization state of the node with
the result of the column generation.
Returns `false` if the node is infeasible, subsolver time limit is reached, or node gap is closed;
`true` if the conquer algorithm continues.
"""
function run_colgen!(ctx::ColCutGenContext, env, reform, input, conquer_output)
colgen_output = run!(ctx.params.colgen, env, reform, input)
update!(conquer_output, colgen_output)
if getterminationstatus(conquer_output) == INFEASIBLE ||
getterminationstatus(conquer_output) == TIME_LIMIT ||
ip_gap_closed(conquer_output, atol = ctx.params.opt_atol, rtol = ctx.params.opt_rtol)
return false
end
return true
end
function run_before_cutgen_user_algo!(
::ColCutGenContext, before_cutgen_user_algo, env, reform, conquer_output
)
if ismanager(before_cutgen_user_algo.algorithm)
records = create_records(reform)
end
changed = run!(before_cutgen_user_algo.algorithm, env, reform, conquer_output)
if ismanager(before_cutgen_user_algo.algorithm)
restore_from_records!(input.units_to_restore, records)
end
return changed
end
"""
Runs several rounds of column and cut generation.
Returns `false` if the column generation returns `false` or time limit is reached.
Returns `true` if the conquer algorithm continues.
"""
function run_colcutgen!(ctx::ColCutGenContext, env, reform, input, conquer_output)
nb_cut_rounds = 0
run_conquer = true
master_changed = true # stores value returned by the algorithm called before cut gen.
cuts_were_added = true # stores value returned by cut gen.
while run_conquer && (cuts_were_added || master_changed)
run_conquer = run_colgen!(ctx, env, reform, input, conquer_output)
if !run_conquer
return false
end
master_changed = false
before_cutgen_user_algorithm = ctx.params.before_cutgen_user_algorithm
if !isnothing(before_cutgen_user_algorithm)
master_changed = run_before_cutgen_user_algo!(
ctx, before_cutgen_user_algorithm, env, reform, conquer_output
)
end
sol = get_best_lp_primal_sol(conquer_output)
cuts_were_added = false
if !isnothing(sol)
if run_conquer && nb_cut_rounds < ctx.params.max_nb_cut_rounds
cuts_were_added = run_cutgen!(ctx, env, reform, sol)
nb_cut_rounds += 1
end
else
@warn "Column generation did not produce an LP primal solution. Skip cut generation."
end
time_limit_reached!(conquer_output, env) && return false
end
return true
end
# get_heuristics_to_run!
function get_heuristics_to_run(ctx::ColCutGenContext, node_depth)
return sort!(
filter(
h -> node_depth <= h.max_depth #= & frequency () TODO define a function here =#,
ctx.params.primal_heuristics
),
by = h -> node_depth == 0 ? h.root_priority : h.nonroot_priority,
rev = true
)
end
# run_heuristics!
function run_heuristics!(ctx::ColCutGenContext, heuristics, env, reform, input, conquer_output)
for heuristic in heuristics
# TODO: check time limit of Coluna
if ip_gap_closed(conquer_output, atol = ctx.params.opt_atol, rtol = ctx.params.opt_rtol)
return false
end
if ismanager(heuristic.algorithm)
records = create_records(reform)
end
prev_primal_bound = get_ip_primal_bound(conquer_output)
heuristic_output = run!(heuristic.algorithm, env, reform, conquer_output)
add_ip_primal_sols!(conquer_output, get_ip_primal_sols(heuristic_output)...)
update_ip_dual_bound!(conquer_output, get_ip_dual_bound(heuristic_output))
if ctx.params.verbose
curr_primal_bound = get_ip_primal_bound(conquer_output)
if curr_primal_bound != prev_primal_bound
@info "Heuristic $(heuristic.name) found improving primal solution with value $(curr_primal_bound.value)"
end
end
if ismanager(heuristic.algorithm)
restore_from_records!(input.units_to_restore, records)
end
if getterminationstatus(conquer_output) == TIME_LIMIT ||
ip_gap_closed(conquer_output, atol = ctx.params.opt_atol, rtol = ctx.params.opt_rtol)
return false
end
end
return true
end
"""
Runs the preprocessing algorithm.
Returns `true` if conquer algorithm should continue;
`false` otherwise (in the case where preprocessing finds the formulation infeasible).
"""
function run_preprocessing!(::ColCutGenContext, preprocess_algo, env, reform, conquer_output)
preprocess_output = run!(preprocess_algo, env, reform, nothing)
if isinfeasible(preprocess_output)
setterminationstatus!(conquer_output, INFEASIBLE)
return false
end
return true
end
function run_node_finalizer!(::ColCutGenContext, node_finalizer, env, reform, node_depth, conquer_output)
if node_depth >= node_finalizer.min_depth #= TODO: put in a function =#
if ismanager(node_finalizer.algorithm)
records = create_records(reform)
end
nf_output = run!(node_finalizer.algorithm, env, reform, conquer_output)
status = getterminationstatus(nf_output)
ip_primal_sols = get_ip_primal_sols(nf_output)
# if the node has been conquered by the node finalizer
if status in (OPTIMAL, INFEASIBLE)
# set the ip solutions found without checking the cuts and finish
if !isnothing(ip_primal_sols) && length(ip_primal_sols) > 0
for sol in sort(ip_primal_sols)
update_ip_primal_sol!(conquer_output, sol)
end
end
# make sure that the gap is closed for the current node
dual_bound = DualBound(reform, getvalue(get_ip_primal_bound(conquer_output)))
update_lp_dual_bound!(conquer_output, dual_bound)
update_ip_dual_bound!(conquer_output, dual_bound)
else
if !isnothing(ip_primal_sols) && length(ip_primal_sols) > 0
# we start with worst solution to add all improving solutions
for sol in sort(ip_primal_sols)
cutgen = CutCallbacks(call_robust_facultative = false)
# TODO by Artur : Node finalizer should ensure itselves that the returned solution is feasible
# NOTE by Guillaume: How can we do that ? I'm not sure it's a good idea to couple NF and cut gen.
cutcb_output = run!(cutgen, env, getmaster(reform), CutCallbacksInput(sol))
if cutcb_output.nb_cuts_added == 0
update_ip_primal_sol!(conquer_output, sol)
end
end
end
end
if ismanager(node_finalizer.algorithm)
restore_from_records!(input.units_to_restore, records)
end
end
return true
end
function run_colcutgen_conquer!(ctx::ColCutGenContext, env, reform, input)
run_conquer = true # certainly useless
# We initialize the output of the conquer algorithm using the input given by the algorithm
# that calls the conquer strategy. This output will be updated by the conquer algorithm.
conquer_output = OptimizationState(
getmaster(reform);
global_primal_bound_handler = get_global_primal_handler(input)
)
time_limit_reached!(conquer_output, env) && return conquer_output
if !isnothing(ctx.params.preprocess)
run_conquer = run_preprocessing!(ctx, ctx.params.preprocess, env, reform, conquer_output)
!run_conquer && return conquer_output
end
time_limit_reached!(conquer_output, env) && return conquer_output
run_conquer = run_colcutgen!(ctx, env, reform, input, conquer_output)
!run_conquer && return conquer_output
time_limit_reached!(conquer_output, env) && return conquer_output
heuristics_to_run = get_heuristics_to_run(ctx, get_node_depth(input))
run_conquer = run_heuristics!(ctx, heuristics_to_run, env, reform, input, conquer_output)
!run_conquer && return conquer_output
time_limit_reached!(conquer_output, env) && return conquer_output
# if the gap is still unclosed, try to run the node finalizer
node_finalizer = ctx.params.node_finalizer
if !ip_gap_closed(conquer_output, atol = ctx.params.opt_atol, rtol = ctx.params.opt_rtol) && !isnothing(node_finalizer)
run_node_finalizer!(ctx, node_finalizer, env, reform, get_node_depth(input), conquer_output)
end
time_limit_reached!(conquer_output, env) && return conquer_output
if ip_gap_closed(conquer_output, atol = ctx.params.opt_atol, rtol = ctx.params.opt_rtol)
setterminationstatus!(conquer_output, OPTIMAL)
elseif getterminationstatus(conquer_output) != TIME_LIMIT && getterminationstatus(conquer_output) != INFEASIBLE
setterminationstatus!(conquer_output, OTHER_LIMIT)
end
return conquer_output
end
function run!(algo::ColCutGenConquer, env::Env, reform::Reformulation, input::AbstractConquerInput)
ctx = new_context(type_of_context(algo), algo, reform, input)
return run_colcutgen_conquer!(ctx, env, reform, input)
end
####################################################################
# RestrMasterLPConquer
####################################################################
"""
RestrMasterLPConquer(
masterlpalgo = SolveLpForm(
get_ip_primal_sol = true
)
)
Conquer algorithm that solves the master problem using a linear programming solver.
"""
@with_kw struct RestrMasterLPConquer <: AbstractConquerAlgorithm
masterlpalgo::SolveLpForm = SolveLpForm(
get_ip_primal_sol = true
)
end
# RestrMasterLPConquer does not use any unit, therefore get_units_usage() is not defined for it
function get_child_algorithms(algo::RestrMasterLPConquer, reform::Reformulation)
return Dict("restr_master_lp" => (algo.masterlpalgo, getmaster(reform)))
end
function run!(algo::RestrMasterLPConquer, env::Env, reform::Reformulation, input::AbstractConquerInput)
conquer_output = OptimizationState(
getmaster(reform);
global_primal_bound_handler = get_global_primal_handler(input)
)
masterlp_state = run!(algo.masterlpalgo, env, getmaster(reform), conquer_output)
update!(conquer_output, masterlp_state)
if ip_gap_closed(masterlp_state)
setterminationstatus!(conquer_output, OPTIMAL)
else
setterminationstatus!(conquer_output, OTHER_LIMIT)
end
return conquer_output
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 4536 | # The storage API is easy to use when you have one model.
# However, when you optimize a problem with Coluna, you generally have one reformulation that
# maintains several formulations.
# Each reformulation and formulation has a storage that has storage units.
# It becomes very hard to know which storage a record should restore.
# Therefore, we built this API on top of the storage API.
# It performs the same operations than the storage API but maintain additional information
# when you create records to easily restore the good storage unit.
# For each storage unit of Coluna, you must define a storage unit key.
# In nodes, we don't know the type of record we store.
# We thus use the AbstractStorageUnitKey and following methods for type inference.
abstract type AbstractStorageUnitKey end
key_from_storage_unit_type(T::Type{<:AbstractRecordUnit}) =
error("key_from_storage_unit_type(::Type{$(T)}) not implemented.")
record_type_from_key(k::AbstractStorageUnitKey) =
error("record_type_from_key(::$(typeof(k))) not implemented.")
############################################################################################
# create_records built on top of ClB.create_record
############################################################################################
struct Records
records_by_model_id::Dict{Int, Dict{AbstractStorageUnitKey, AbstractRecord}}
Records() = new(Dict{Int, Dict{AbstractStorageUnitKey, AbstractRecord}}())
end
function _add_rec!(
r::Records, model::AbstractModel, storage_unit_type::Type{<:AbstractRecordUnit}, record::AbstractRecord
)
model_id = getuid(model)
if !haskey(r.records_by_model_id, model_id)
r.records_by_model_id[model_id] = Dict{AbstractStorageUnitKey, AbstractRecord}()
end
if haskey(r.records_by_model_id[model_id], storage_unit_type)
@error "Already added record for model $(getuid(model)) and storage unit $(storage_unit_type).
Going to replace it."
end
key = key_from_storage_unit_type(storage_unit_type)
r.records_by_model_id[getuid(model)][key] = record
return
end
function _get_rec(r::Records, model::AbstractModel, key::AbstractStorageUnitKey)
model_id = getuid(model)
records_of_model = get(r.records_by_model_id, model_id, nothing)
if !isnothing(records_of_model)
if haskey(records_of_model, key)
RT = record_type_from_key(key)
# Note that the `::RT` in the line below is necessary for type inference.
return records_of_model[key]::RT
end
end
return nothing
end
function _create_records!(records::Records, model)
storage = getstorage(model)
for storage_unit_type in Iterators.keys(storage.units)
record = create_record(storage, storage_unit_type)
_add_rec!(records, model, storage_unit_type, record)
end
return
end
"""
create_records(reformulation) -> Records
Methods to create records of all storage units of a reformulation and the formulations
handled by the reformulation.
"""
function create_records(reform::Reformulation)
records = Records()
_create_records!(records, reform)
_create_records!(records, getmaster(reform))
for form in Iterators.values(get_dw_pricing_sps(reform))
_create_records!(records, form)
end
for form in Iterators.values(get_benders_sep_sps(reform))
_create_records!(records, form)
end
return records
end
############################################################################################
# restore_from_records! built on top of ClB.restore_from_record!
############################################################################################
"""
Store a set of storage unit type associated to the model.
Used to indicate what storage units from what models we want to restore.
"""
struct UnitsUsage
units_used::Vector{Tuple{AbstractModel,DataType}}
end
UnitsUsage() = UnitsUsage(Vector{Tuple{AbstractModel,DataType}}())
"""
restore_from_records!(units_used::UnitsUsage, records::Records)
Method to restore storage units from reformulations and formulations given a set of records
stored in an object of type `Records`.
"""
function restore_from_records!(units_usage::UnitsUsage, records::Records)
for (model, storage_unit_type) in units_usage.units_used
record = _get_rec(records, model, key_from_storage_unit_type(storage_unit_type))
if !isnothing(record)
restore_from_record!(getstorage(model), record)
end
end
return
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 12921 | """
StaticVarState
Used in formulation records
"""
struct StaticVarState
cost::Float64
lb::Float64
ub::Float64
end
function apply_state!(form::Formulation, var::Variable, var_state::StaticVarState)
if getcurlb(form, var) != var_state.lb
setcurlb!(form, var, var_state.lb)
end
if getcurub(form, var) != var_state.ub
setcurub!(form, var, var_state.ub)
end
if getcurcost(form, var) != var_state.cost
setcurcost!(form, var, var_state.cost)
end
return
end
"""
ConstrState
Used in formulation records
"""
struct ConstrState
rhs::Float64
end
function apply_state!(form::Formulation, constr::Constraint, constr_state::ConstrState)
if getcurrhs(form, constr) != constr_state.rhs
setcurrhs!(form, constr, constr_state.rhs)
end
return
end
"""
MasterBranchConstrsUnit
Unit for master branching constraints.
Can be restored using MasterBranchConstrsRecord.
"""
struct MasterBranchConstrsUnit <: AbstractRecordUnit end
mutable struct MasterBranchConstrsRecord <: AbstractRecord
constrs::Dict{ConstrId,ConstrState}
end
struct MasterBranchConstrsKey <: AbstractStorageUnitKey end
key_from_storage_unit_type(::Type{MasterBranchConstrsUnit}) = MasterBranchConstrsKey()
record_type_from_key(::MasterBranchConstrsKey) = MasterBranchConstrsRecord
ClB.storage_unit(::Type{MasterBranchConstrsUnit}, _) = MasterBranchConstrsUnit()
function ClB.record(::Type{MasterBranchConstrsRecord}, id::Int, form::Formulation, unit::MasterBranchConstrsUnit)
@logmsg LogLevel(-2) "Storing branching constraints"
record = MasterBranchConstrsRecord(Dict{ConstrId,ConstrState}())
for (id, constr) in getconstrs(form)
if getduty(id) <= AbstractMasterBranchingConstr &&
iscuractive(form, constr) && isexplicit(form, constr)
constrstate = ConstrState(getcurrhs(form, constr))
record.constrs[id] = constrstate
end
end
return record
end
ClB.record_type(::Type{MasterBranchConstrsUnit}) = MasterBranchConstrsRecord
ClB.storage_unit_type(::Type{MasterBranchConstrsRecord}) = MasterBranchConstrsUnit
function ClB.restore_from_record!(
form::Formulation, ::MasterBranchConstrsUnit, record::MasterBranchConstrsRecord
)
@logmsg LogLevel(-2) "Restoring branching constraints"
for (id, constr) in getconstrs(form)
if getduty(id) <= AbstractMasterBranchingConstr && isexplicit(form, constr)
@logmsg LogLevel(-4) "Checking " getname(form, constr)
if haskey(record.constrs, id)
if !iscuractive(form, constr)
@logmsg LogLevel(-2) string("Activating branching constraint", getname(form, constr))
activate!(form, constr)
else
@logmsg LogLevel(-2) string("Leaving branching constraint", getname(form, constr))
end
@logmsg LogLevel(-4) "Updating data"
apply_state!(form, constr, record.constrs[id])
else
if iscuractive(form, constr)
@logmsg LogLevel(-2) string("Deactivating branching constraint", getname(form, constr))
deactivate!(form, constr)
end
end
end
end
end
"""
MasterColumnsUnit
Unit for branching constraints of a formulation.
Can be restored using a MasterColumnsRecord.
"""
struct MasterColumnsUnit <: AbstractRecordUnit end
mutable struct MasterColumnsRecord <: AbstractRecord
active_cols::Set{VarId}
MasterColumnsRecord() = new(Set{VarId}())
end
push!(record::MasterColumnsRecord, id::VarId) = push!(record.active_cols, id)
struct MasterColumnsKey <: AbstractStorageUnitKey end
key_from_storage_unit_type(::Type{MasterColumnsUnit}) = MasterColumnsKey()
record_type_from_key(::MasterColumnsKey) = MasterColumnsRecord
ClB.storage_unit(::Type{MasterColumnsUnit}, _) = MasterColumnsUnit()
function ClB.record(::Type{MasterColumnsRecord}, id::Int, form::Formulation, unit::MasterColumnsUnit)
record = MasterColumnsRecord()
for (id, var) in getvars(form)
if getduty(id) <= MasterCol && isexplicit(form, var) && iscuractive(form, var)
push!(record, id)
end
end
return record
end
ClB.record_type(::Type{MasterColumnsUnit}) = MasterColumnsRecord
ClB.storage_unit_type(::Type{MasterColumnsRecord}) = MasterColumnsUnit
function ClB.restore_from_record!(
form::Formulation, ::MasterColumnsUnit, state::MasterColumnsRecord
)
for (id, var) in getvars(form)
if getduty(id) <= MasterCol && isexplicit(form, var)
if id in state.active_cols
if !iscuractive(form, var)
activate!(form, var)
end
else
if iscuractive(form, var)
deactivate!(form, var)
end
end
end
end
return
end
"""
MasterCutsUnit
Unit for cutting planes of a formulation.
Can be restored using a MasterCutsRecord.
"""
struct MasterCutsUnit <: AbstractRecordUnit end
MasterCutsUnit(::Formulation) = MasterCutsUnit()
mutable struct MasterCutsRecord <: AbstractRecord
cuts::Dict{ConstrId,ConstrState}
end
struct MasterCutsKey <: AbstractStorageUnitKey end
key_from_storage_unit_type(::Type{MasterCutsUnit}) = MasterCutsKey()
record_type_from_key(::MasterCutsKey) = MasterCutsRecord
ClB.storage_unit(::Type{MasterCutsUnit}, _) = MasterCutsUnit()
function ClB.record(::Type{MasterCutsRecord}, id::Int, form::Formulation, unit::MasterCutsUnit)
@logmsg LogLevel(-2) "Storing master cuts"
record = MasterCutsRecord(Dict{ConstrId,ConstrState}())
for (id, constr) in getconstrs(form)
if getduty(id) <= AbstractMasterCutConstr &&
iscuractive(form, constr) && isexplicit(form, constr)
constrstate = ConstrState(getcurrhs(form, constr))
record.cuts[id] = constrstate
end
end
return record
end
ClB.record_type(::Type{MasterCutsUnit}) = MasterCutsRecord
ClB.storage_unit_type(::Type{MasterCutsRecord}) = MasterCutsUnit
function ClB.restore_from_record!(
form::Formulation, ::MasterCutsUnit, state::MasterCutsRecord
)
@logmsg LogLevel(-2) "Storing master cuts"
for (id, constr) in getconstrs(form)
if getduty(id) <= AbstractMasterCutConstr && isexplicit(form, constr)
@logmsg LogLevel(-4) "Checking " getname(form, constr)
if haskey(state.cuts, id)
if !iscuractive(form, constr)
@logmsg LogLevel(-4) string("Activating cut", getname(form, constr))
activate!(form, constr)
end
@logmsg LogLevel(-4) "Updating data"
apply_state!(form, constr, state.cuts[id])
else
if iscuractive(form, constr)
@logmsg LogLevel(-4) string("Deactivating cut", getname(form, constr))
deactivate!(form, constr)
end
end
end
end
end
"""
StaticVarConstrUnit
Unit for static variables and constraints of a formulation.
Can be restored using a StaticVarConstrRecord.
"""
struct StaticVarConstrUnit <: AbstractRecordUnit end
StaticVarConstrUnit(::Formulation) = StaticVarConstrUnit()
mutable struct StaticVarConstrRecord <: AbstractRecord
constrs::Dict{ConstrId,ConstrState}
vars::Dict{VarId,StaticVarState}
end
# TO DO: we need to keep here only the difference with the initial data
struct StaticVarConstrKey <: AbstractStorageUnitKey end
key_from_storage_unit_type(::Type{StaticVarConstrUnit}) = StaticVarConstrKey()
record_type_from_key(::StaticVarConstrKey) = StaticVarConstrRecord
ClB.storage_unit(::Type{StaticVarConstrUnit}, _) = StaticVarConstrUnit()
# function Base.show(io::IO, record::StaticVarConstrRecord)
# print(io, "[vars:")
# for (id, var) in record.vars
# print(io, " ", MathProg.getuid(id))
# end
# print(io, ", constrs:")
# for (id, constr) in record.constrs
# print(io, " ", MathProg.getuid(id))
# end
# print(io, "]")
# end
function ClB.record(::Type{StaticVarConstrRecord}, id::Int, form::Formulation, unit::StaticVarConstrUnit)
@logmsg LogLevel(-2) string("Storing static vars and consts")
record = StaticVarConstrRecord(Dict{ConstrId,ConstrState}(), Dict{VarId,StaticVarState}())
for (id, constr) in getconstrs(form)
if isaStaticDuty(getduty(id)) && iscuractive(form, constr) && isexplicit(form, constr)
constrstate = ConstrState(getcurrhs(form, constr))
record.constrs[id] = constrstate
end
end
for (id, var) in getvars(form)
if isaStaticDuty(getduty(id)) && isexplicit(form, var) && iscuractive(form, var)
varstate = StaticVarState(
getcurcost(form, var),
getcurlb(form, var),
getcurub(form, var)
)
record.vars[id] = varstate
end
end
return record
end
ClB.record_type(::Type{StaticVarConstrUnit}) = StaticVarConstrRecord
ClB.storage_unit_type(::Type{StaticVarConstrRecord}) = StaticVarConstrUnit
function ClB.restore_from_record!(
form::Formulation, ::StaticVarConstrUnit, record::StaticVarConstrRecord
)
@logmsg LogLevel(-2) "Restoring static vars and consts"
for (id, constr) in getconstrs(form)
if isaStaticDuty(getduty(id)) && isexplicit(form, constr)
@logmsg LogLevel(-4) "Checking " getname(form, constr)
if haskey(record.constrs, id)
if !iscuractive(form, constr)
@logmsg LogLevel(-2) string("Activating constraint", getname(form, constr))
activate!(form, constr)
end
@logmsg LogLevel(-4) "Updating data"
apply_state!(form, constr, record.constrs[id])
else
if iscuractive(form, constr)
@logmsg LogLevel(-2) string("Deactivating constraint", getname(form, constr))
deactivate!(form, constr)
end
end
end
end
for (id, var) in getvars(form)
if isaStaticDuty(getduty(id)) && isexplicit(form, var)
@logmsg LogLevel(-4) "Checking " getname(form, var)
if haskey(record.vars, id)
if !iscuractive(form, var)
@logmsg LogLevel(-4) string("Activating variable", getname(form, var))
activate!(form, var)
end
@logmsg LogLevel(-4) "Updating data"
apply_state!(form, var, record.vars[id])
else
if iscuractive(form, var)
@logmsg LogLevel(-4) string("Deactivating variable", getname(form, var))
deactivate!(form, var)
end
end
end
end
end
"""
PartialSolutionUnit
Unit for current the partial solution of a formulation.
Can be restored using a PartialSolutionRecord.
"""
struct PartialSolutionUnit <: AbstractRecordUnit end
PartialSolutionUnit(::Formulation) = PartialSolutionUnit()
mutable struct PartialSolutionRecord <: AbstractRecord
partial_solution::Dict{VarId, Float64}
PartialSolutionRecord(form::Formulation) = new(copy(MathProg.getpartialsol(form)))
end
struct PartialSolutionKey <: AbstractStorageUnitKey end
key_from_storage_unit_type(::Type{PartialSolutionUnit}) = PartialSolutionKey()
record_type_from_key(::PartialSolutionKey) = PartialSolutionRecord
ClB.storage_unit(::Type{PartialSolutionUnit}, _) = PartialSolutionUnit()
function ClB.record(::Type{PartialSolutionRecord}, id::Int, form::Formulation, unit::PartialSolutionUnit)
return PartialSolutionRecord(form)
end
ClB.record_type(::Type{PartialSolutionUnit}) = PartialSolutionRecord
ClB.storage_unit_type(::Type{PartialSolutionRecord}) = PartialSolutionUnit
function ClB.restore_from_record!(
form::Formulation, ::PartialSolutionUnit, record::PartialSolutionRecord
)
@logmsg LogLevel(-2) "Restoring partial solution"
form_part_sol = MathProg.getpartialsol(form)
change_dict = Dict{VarId, Float64}()
for (var_id, cur_value) in form_part_sol
record_value = get(record.partial_solution, var_id, 0.0)
if cur_value != record_value
change_dict[var_id] = record_value
end
end
for (var_id, record_value) in record.partial_solution
if !haskey(form_part_sol, var_id)
change_dict[var_id] = record_value
end
end
for (var_id, value) in change_dict
@logmsg LogLevel(-4) string("Changing value of ", getname(form, var_id), " in partial solution to ", value)
MathProg.set_value_in_partial_solution!(form, var_id, value)
end
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 8029 | ############################################################################################
# Algorithm API
############################################################################################
# TODO: remove this method.
# We currently need it because we give to the parent algorithm the responsability of recording
# and restoring the state of a formulation when a child algorithm may divide the calculation
# flow and thus making invalid the formulation for the parent algorithm at the end of the
# child execution.
# If we give the responsability of recording/restoring to the child, we won't need this method
# anymore and we'll get rid of the concept of manager & worker algorithm.
ismanager(algo::AlgoAPI.AbstractAlgorithm) = false
"""
get_child_algorithms(algo, model) -> Dict{String, Tuple{AbstractAlgorithm, AbstractModel}}
Every algorithm should communicate its child algorithms and the model to which
each child algorithm is applied.
It should returns a dictionary where the keys are the names of the child algorithms and
the values are the algorithm parameters and the model to which the algorithm is applied.
By default, `get_child_algorithms` returns an empty dictionary.
"""
get_child_algorithms(::AlgoAPI.AbstractAlgorithm, ::AbstractModel) = Dict{String, Tuple{AlgoAPI.AbstractAlgorithm, AbstractModel}}()
"""
get_units_usage(algo, model) -> Tuple{AbstractModel, UnitType, UnitPermission}[]
Every algorithm should communicate the storage units it uses (so that these units
are created in the beginning) and the usage mode (read only or read-and-write). Usage mode is needed for
in order to restore units before running a worker algorithm.
"""
get_units_usage(::AlgoAPI.AbstractAlgorithm, ::AbstractModel) = Tuple{AlgoAPI.AbstractModel, UnitType, UnitPermission}[]
############################################################################################
# Conquer Algorithm API
############################################################################################
"""
AbstractConquerInput
Input of a conquer algorithm used by the tree search algorithm.
Contains the node in the search tree and the collection of units to restore
before running the conquer algorithm. This collection of units is passed
in the input so that it is not obtained each time the conquer algorithm runs.
"""
abstract type AbstractConquerInput end
@mustimplement "ConquerInput" get_node(i::AbstractConquerInput) = nothing
@mustimplement "ConquerInput" get_units_to_restore(i::AbstractConquerInput) = nothing
@mustimplement "ConquerInput" run_conquer(i::AbstractConquerInput) = nothing
@mustimplement "ConquerInput" get_global_primal_handler(i::AbstractConquerInput) = nothing
@mustimplement "ConquerInput" get_conquer_input_ip_dual_bound(i::AbstractConquerInput) = nothing
"""
AbstractConquerAlgorithm
This algorithm type is used by the tree search algorithm to update the incumbents and the formulation.
For the moment, a conquer algorithm can be run only on reformulation.
A conquer algorithm should restore records of storage units using `restore_from_records!(conquer_input)`
- each time it runs in the beginning
- each time after calling a child manager algorithm
"""
abstract type AbstractConquerAlgorithm <: AlgoAPI.AbstractAlgorithm end
# conquer algorithms are always manager algorithms (they manage storing and restoring units)
ismanager(algo::AbstractConquerAlgorithm) = true
@mustimplement "ConquerAlgorithm" run!(::AbstractConquerAlgorithm, ::Env, ::Reformulation, ::AbstractConquerInput) = nothing
############################################################################################
# Optimization Algorithm API
############################################################################################
"""
AbstractOptimizationAlgorithm
This type of algorithm is used to "bound" a model, i.e. to improve primal
and dual bounds of the model. Solving to optimality is a special case of "bounding".
The input of such algorithm should be of type OptimizationState.
The output of such algorithm should be of type OptimizationState.
"""
abstract type AbstractOptimizationAlgorithm <: AlgoAPI.AbstractAlgorithm end
# this function collects storage units to restore for an algorithm and all its child worker algorithms,
# child manager algorithms are skipped, as their restore units themselves
function _collect_units_to_restore!(
global_units_usage::UnitsUsage, algo::AlgoAPI.AbstractAlgorithm, model::AbstractModel
)
for (unit_model, unit_type, unit_usage) in get_units_usage(algo, model)
push!(global_units_usage.units_used, (unit_model, unit_type))
end
for (childalgo, childmodel) in values(get_child_algorithms(algo, model))
if !ismanager(childalgo)
_collect_units_to_restore!(global_units_usage, childalgo, childmodel)
end
end
end
function collect_units_to_restore!(algo::AlgoAPI.AbstractAlgorithm, model::AbstractModel)
global_units_usage = UnitsUsage()
_collect_units_to_restore!(global_units_usage, algo, model)
return global_units_usage
end
# this function collects units to create for an algorithm and all its child algorithms
# this function is used only the function initialize_storage_units! below
function collect_units_to_create!(
units_to_create::Dict{AbstractModel,Set{UnitType}}, algo::AlgoAPI.AbstractAlgorithm, model::AbstractModel
)
units_usage = get_units_usage(algo, model)
for (unit_model, unit_pair, _) in units_usage
if !haskey(units_to_create, unit_model)
units_to_create[unit_model] = Set{UnitType}()
end
push!(units_to_create[unit_model], unit_pair)
end
child_algos = values(get_child_algorithms(algo, model))
for (childalgo, childmodel) in child_algos
collect_units_to_create!(units_to_create, childalgo, childmodel)
end
return
end
# this function initializes all the storage units
function initialize_storage_units!(reform::Reformulation, algo::AbstractOptimizationAlgorithm)
units_to_create = Dict{AbstractModel,Set{UnitType}}()
collect_units_to_create!(units_to_create, algo, reform)
for (model, types_of_storage_unit) in units_to_create
storagedict = model.storage.units
if storagedict === nothing
error(string("Model of type $(typeof(model)) with id $(getuid(model)) ",
"is not contained in $(getnicename(data))")
)
end
for storage_unit_type in types_of_storage_unit
storagedict[storage_unit_type] = RecordUnitManager(storage_unit_type, model)
end
end
end
############################################################################################
# Routines to check & initialize algorithms before starting the optimization.
############################################################################################
function _check_alg_parameters!(inconsistencies, algo, reform::Reformulation)
for (name, (child_algo, model)) in get_child_algorithms(algo, reform)
for name in fieldnames(typeof(child_algo))
value = getfield(child_algo, name)
consistent_val = check_parameter(child_algo, Val(name), value, reform)
if !consistent_val
push!(inconsistencies, (name, child_algo, value))
end
end
_check_alg_parameters!(inconsistencies, child_algo, model)
end
return
end
"""
check_alg_parameters(top_algo, reform) -> Vector{Tuple{Symbol, AbstractAlgorithm, Any}}
Checks the consistency of the parameters of the top algorithm and its children algorithms.
Returns a vector of tuples (name of the parameter, algorithm, value of the parameter) that
lists all the inconsistencies found in the algorithms tree.
"""
function check_alg_parameters(top_algo, reform::Reformulation)
inconsistencies = []
_check_alg_parameters!(inconsistencies, top_algo, reform)
return inconsistencies
end | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 9846 | """
Coluna.Algorithm.TreeSearchAlgorithm(
presolvealg = nothing,
conqueralg::AbstractConquerAlgorithm = ColCutGenConquer(),
dividealg::AbstractDivideAlgorithm = Branching(),
explorestrategy::AbstractExploreStrategy = DepthFirstStrategy(),
maxnumnodes = 100000,
opennodeslimit = 100,
timelimit = -1, # -1 means no time limit
opt_atol::Float64 = DEF_OPTIMALITY_ATOL,
opt_rtol::Float64 = DEF_OPTIMALITY_RTOL,
branchingtreefile = "",
jsonfile = "",
print_node_info = true
)
This algorithm is a branch and bound that uses a search tree to optimize the reformulation.
At each node in the tree, it applies `conqueralg` to evaluate the node and improve the bounds,
`dividealg` to generate branching constraints, and `explorestrategy`
to select the next node to treat. Optionally, the `presolvealg` is run in the beginning to
preprocess the formulation.
The three main elements of the algorithm are:
- the conquer strategy (`conqueralg`): evaluation of the problem at a node of the Branch-and-Bound tree. Depending on the type of decomposition used ahead of the Branch-and-Bound, you can use either Column Generation (if your problem is decomposed following Dantzig-Wolfe transformation) and/or Cut Generation (for Dantzig-Wolfe and Benders decompositions).
- the branching strategy (`dividealg`): how to create new branches i.e. how to divide the search space
- the explore strategy (`explorestrategy`): the evaluation order of your nodes
Parameters:
- `maxnumnodes` : maximum number of nodes explored by the algorithm
- `opennodeslimit` : maximum number of nodes waiting to be explored
- `timelimit` : time limit in seconds of the algorithm
- `opt_atol` : optimality absolute tolerance (alpha)
- `opt_rtol` : optimality relative tolerance (alpha)
Options:
- `branchingtreefile` : name of the file in which the algorithm writes an overview of the branching tree
- `jsonfile` : name of the file in which the algorithm writes the solution in JSON format
- `print_node_info` : log the tree into the console
**Warning**: if you set a name for the `branchingtreefile` AND the `jsonfile`, the algorithm will only write
in the json file.
"""
struct TreeSearchAlgorithm <: AbstractOptimizationAlgorithm
presolvealg::Union{Nothing,PresolveAlgorithm}
conqueralg::AbstractConquerAlgorithm
dividealg::AlgoAPI.AbstractDivideAlgorithm
explorestrategy::TreeSearch.AbstractExploreStrategy
maxnumnodes::Int64
opennodeslimit::Int64
timelimit::Int64
opt_atol::Float64
opt_rtol::Float64
branchingtreefile::String
jsonfile::String
print_node_info::Bool
TreeSearchAlgorithm(;
presolvealg = nothing,
conqueralg = ColCutGenConquer(),
dividealg = ClassicBranching(),
explorestrategy = TreeSearch.DepthFirstStrategy(),
maxnumnodes = 100000,
opennodeslimit = 100,
timelimit = -1, # means no time limit
opt_atol = AlgoAPI.default_opt_atol(),
opt_rtol = AlgoAPI.default_opt_rtol(),
branchingtreefile = "",
jsonfile = "",
print_node_info = true
) = new(presolvealg, conqueralg, dividealg, explorestrategy, maxnumnodes, opennodeslimit, timelimit, opt_atol, opt_rtol, branchingtreefile, jsonfile, print_node_info)
end
# TreeSearchAlgorithm is a manager algorithm (manages storing and restoring storage units)
ismanager(algo::TreeSearchAlgorithm) = true
# TreeSearchAlgorithm does not use any record itself,
# therefore get_units_usage() is not defined for it
function get_child_algorithms(algo::TreeSearchAlgorithm, reform::Reformulation)
child_algos = Dict(
"conquer" => (algo.conqueralg, reform),
"divide" => (algo.dividealg, reform)
)
if !isnothing(algo.presolvealg)
child_algos["presolve"] = (algo.presolvealg, reform)
end
return child_algos
end
function run!(algo::TreeSearchAlgorithm, env::Env, reform::Reformulation, input::OptimizationState)
# TreeSearchAlgorithm is the only algorithm that changes the global time limit in the
# environment. However, time limit set from JuMP/MOI has priority.
if env.global_time_limit == -1
env.global_time_limit = algo.timelimit
else
@warn "Global time limit has been set through JuMP/MOI. Ignoring the time limit of TreeSearchAlgorithm."
end
if !isnothing(algo.presolvealg)
if !isfeasible(run!(algo.presolvealg, env, reform, PresolveInput()))
output = input
setterminationstatus!(output, ColunaBase.INFEASIBLE)
return output
end
end
search_space = TreeSearch.new_space(TreeSearch.search_space_type(algo), algo, reform, input)
return TreeSearch.tree_search(algo.explorestrategy, search_space, env, input)
end
############################################################################################
# Tree search interface for Coluna algorithms
############################################################################################
"Search space for tree search algorithms in Coluna."
abstract type AbstractColunaSearchSpace <: TreeSearch.AbstractSearchSpace end
# Additional methods to implement to use the tree search algorithms together with Coluna's
# algorithms.
"Returns the previous node explored by the tree search algorithm."
@mustimplement "ColunaSearchSpace" get_previous(s::AbstractColunaSearchSpace) = nothing
"Sets the previous node explored by the tree search algorithm."
@mustimplement "ColunaSearchSpace" set_previous!(s::AbstractColunaSearchSpace, previous) = nothing
"Returns the conquer algorithm."
@mustimplement "ColunaSearchSpace" get_conquer(sp::AbstractColunaSearchSpace) = nothing
"Returns the divide algorithm."
@mustimplement "ColunaSearchSpace" get_divide(sp::AbstractColunaSearchSpace) = nothing
"Returns the reformulation that will be passed to an algorithm."
@mustimplement "ColunaSearchSpace" get_reformulation(s::AbstractColunaSearchSpace) = nothing
"""
Returns the input that will be passed to an algorithm.
The input can be built from information contained in a search space and a node.
"""
@mustimplement "ColunaSearchSpace" get_input(a::AlgoAPI.AbstractAlgorithm, s::AbstractColunaSearchSpace, n::TreeSearch.AbstractNode) = nothing
"""
Methods to perform operations before the tree search algorithm evaluates a node (`current`).
This is useful to restore the state of the formulation for instance.
"""
@mustimplement "ColunaSearchSpace" node_change!(previous::TreeSearch.AbstractNode, current::TreeSearch.AbstractNode, space::AbstractColunaSearchSpace) = nothing
"""
Methods to perform operations after the conquer algorithms.
It receives the output of the conquer algorithm.
"""
@mustimplement "ColunaSearchSpace" after_conquer!(::AbstractColunaSearchSpace, current, output) = nothing
"Returns the number of children generated by the divide algorithm."
@mustimplement "ColunaSearchSpace" number_of_children(divide_output) = nothing
"""
Performs operations after the divide algorithm when the current node is finally a leaf.
"""
@mustimplement "ColunaSearchSpace" node_is_leaf(::AbstractColunaSearchSpace, current, output) = nothing
"Creates and returns the children of a node associated to a search space."
@mustimplement "ColunaSearchSpace" new_children(sp::AbstractColunaSearchSpace, candidates, n::TreeSearch.AbstractNode) = nothing
@mustimplement "ColunaSearchSpace" run_divide(sp::AbstractColunaSearchSpace, divide_input) = nothing
"Returns true if the current node should not be explored i.e. if its local dual bound inherited from its parent is worst than a primal bound of the search space."
@mustimplement "ColunaSearchSpace" is_pruned(sp::AbstractColunaSearchSpace, current) = nothing
"Method to perform some operations if the current node is pruned."
@mustimplement "ColunaSearchSpace" node_is_pruned(sp::AbstractColunaSearchSpace, current) = nothing
# Implementation of the `children` method for the `AbstractColunaSearchSpace` algorithm.
function TreeSearch.children(space::AbstractColunaSearchSpace, current::TreeSearch.AbstractNode, env)
# restore state of the formulation for the current node.
previous = get_previous(space)
if !isnothing(previous)
node_change!(previous, current, space)
end
set_previous!(space, current)
# We should avoid the whole exploration of a node if its local dual bound inherited from its parent is worst than a primal bound found elsewhere on the tree.
if is_pruned(space, current)
node_is_pruned(space, current)
return []
end
# Else we run the conquer algorithm.
# This algorithm has the responsibility to check whether the node is pruned.
reform = get_reformulation(space)
conquer_output = TreeSearch.get_conquer_output(current)
if conquer_output === nothing
conquer_alg = get_conquer(space)
conquer_input = get_input(conquer_alg, space, current)
conquer_output = run!(conquer_alg, env, reform, conquer_input)
after_conquer!(space, current, conquer_output) # callback to do some operations after the conquer.
end
# Build the divide input from the conquer output
divide_alg = get_divide(space)
divide_input = get_input(divide_alg, space, current, conquer_output)
branches = nothing
# if `run_divide` returns false, the divide is not run and the node is pruned.
if run_divide(space, divide_input)
branches = run!(divide_alg, env, reform, divide_input)
end
if isnothing(branches) || number_of_children(branches) == 0
node_is_leaf(space, current, conquer_output) # callback to do some operations when the node is a leaf.
return [] # node is pruned, no children is generated
end
return new_children(space, branches, current)
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 3519 | """
CutCallbacks(
call_robust_facultative = true,
call_robust_essential = true,
tol::Float64 = 1e-6
)
Runs the cut user callbacks attached to a formulation.
**Parameters:**
- `call_robust_facultative`: if true, call all the robust facultative cut user callbacks (i.e. user cut callbacks)
- `call_robust_essential`: if true, call all the robust essential cut user callbacks (i.e. lazy constraint callbacks)
- `tol`: tolerance used to determine if a cut is violated
See the JuMP documentation for more information about user callbacks and the tutorials in the
Coluna documentation for examples of user callbacks.
"""
@with_kw struct CutCallbacks <: AlgoAPI.AbstractAlgorithm
call_robust_facultative = true
call_robust_essential = true
tol = 1e-6
end
struct CutCallbacksInput
primalsol::PrimalSolution
end
struct CutCallbacksOutput
nb_cuts_added::Int
nb_essential_cuts_added::Int
nb_facultative_cuts_added::Int
end
struct RobustCutCallbackContext
form::Formulation
env::Env
constrkind::ConstrKind
proj_sol::PrimalSolution # ordered non zero but O(log^2(n)) lookup time
proj_sol_dict::Dict{VarId, Float64} # O(1) lookup time
viol_vals::Vector{Float64}
orig_sol::PrimalSolution
end
# CutCallbacks does not have child algorithms, therefore get_child_algorithms() is not defined
function get_units_usage(algo::CutCallbacks, form::Formulation{Duty}
) where {Duty<:MathProg.AbstractFormDuty}
return [(form, MasterCutsUnit, READ_AND_WRITE)]
end
function run!(algo::CutCallbacks, env::Env, form::Formulation, input::CutCallbacksInput)
robust_generators = get_robust_constr_generators(form)
nb_ess_cuts = 0
nb_fac_cuts = 0
if length(robust_generators) > 0 && (algo.call_robust_facultative || algo.call_robust_essential)
!MathProg.projection_is_possible(form) && error("Cannot do projection on original variables. Open an issue.")
projsol1 = proj_cols_on_rep(input.primalsol)
projsol2 = Dict{VarId, Float64}(varid => val for (varid, val) in projsol1)
viol_vals = Float64[]
for constrgen in robust_generators
cur_viol_vals = Float64[]
if constrgen.kind == Facultative && !algo.call_robust_facultative
continue
end
if constrgen.kind == Essential && !algo.call_robust_essential
continue
end
context = RobustCutCallbackContext(
form, env, constrgen.kind, projsol1, projsol2, cur_viol_vals, input.primalsol
)
constrgen.separation_alg(context)
if constrgen.kind == Facultative
nb_fac_cuts += length(cur_viol_vals)
else
nb_ess_cuts += length(cur_viol_vals)
end
for v in cur_viol_vals
push!(viol_vals, v)
end
end
zeroviols = 0
for v in viol_vals
if v < algo.tol
zeroviols += 1
end
end
@printf "Cut separation callback adds %i new essential cuts " nb_ess_cuts
@printf "and %i new facultative cuts.\n" nb_fac_cuts
if nb_fac_cuts + nb_ess_cuts > 0
@printf(
"avg. viol. = %.2f, max. viol. = %.2f, zero viol. = %i.\n",
mean(viol_vals), maximum(viol_vals), zeroviols
)
end
end
return CutCallbacksOutput(nb_ess_cuts + nb_fac_cuts, nb_ess_cuts, nb_fac_cuts)
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 10698 | ################################################################################
# Algorithm
################################################################################
"""
Coluna.Algorithm.SolveIpForm(
optimizer_id = 1
moi_params = MoiOptimize()
user_params = UserOptimize()
custom_params = CustomOptimize()
)
Solve an optimization problem. This algorithm can call different type of optimizers :
- subsolver interfaced with MathOptInterface to optimize a mixed integer program
- pricing callback defined by the user
- custom optimizer to solve a custom model
You can specify an optimizer using the `default_optimizer` attribute of Coluna or
with the method `specify!` from BlockDecomposition.
If you want to define several optimizers for a given subproblem, you must use `specify!`:
specify!(subproblem, optimizers = [optimizer1, optimizer2, optimizer3])
Value of `optimizer_id` is the position of the optimizer you want to use.
For example, if `optimizer_id` is equal to 2, the algorithm will use `optimizer2`.
By default, the algorihm uses the first optimizer or the default optimizer if no
optimizer has been specified through `specify!`.
Depending on the type of the optimizer chosen, the algorithm will use one the
three configurations :
- `moi_params` for subsolver interfaced with MathOptInterface
- `user_params` for pricing callbacks
- `custom_params` for custom solvers
Custom solver is undocumented because alpha.
"""
struct SolveIpForm <: AbstractOptimizationAlgorithm
optimizer_id::Int
moi_params::MoiOptimize
user_params::UserOptimize
custom_params::CustomOptimize
SolveIpForm(;
optimizer_id = 1,
moi_params = MoiOptimize(),
user_params = UserOptimize(),
custom_params = CustomOptimize()
) = new(optimizer_id, moi_params, user_params, custom_params)
end
# SolveIpForm does not have child algorithms, therefore get_child_algorithms() is not defined
# Dispatch on the type of the optimizer to return the parameters
_optimizer_params(::Formulation, algo::SolveIpForm, ::MoiOptimizer) = algo.moi_params
_optimizer_params(::Formulation, algo::SolveIpForm, ::UserOptimizer) = algo.user_params
_optimizer_params(form::Formulation, algo::SolveIpForm, ::CustomOptimizer) = getinner(getoptimizer(form, algo.optimizer_id))
_optimizer_params(::Formulation, ::SolveIpForm, ::NoOptimizer) = nothing
function run!(algo::SolveIpForm, env::Env, form::Formulation, input::OptimizationState, optimizer_id = 1)
opt = getoptimizer(form, algo.optimizer_id)
params = _optimizer_params(form, algo, opt)
if params !== nothing
return run!(params, env, form, input; optimizer_id = algo.optimizer_id)
end
return error("Cannot optimize formulation with optimizer of type $(typeof(opt)).")
end
run!(algo::SolveIpForm, env::Env, reform::Reformulation, input::OptimizationState) =
run!(algo, env, getmaster(reform), input)
################################################################################
# Get units usage (depends on the type of the optimizer)
################################################################################
function get_units_usage(algo::SolveIpForm, form::Formulation)
opt = getoptimizer(form, algo.optimizer_id)
params = _optimizer_params(form, algo, opt)
if params !== nothing
return get_units_usage(params, form)
end
return error("Cannot get units usage of optimizer of type $(typeof(opt)).")
end
# get_units_usage of MoiOptimize
function get_units_usage(
::MoiOptimize, form::Formulation{Duty}
) where {Duty<:MathProg.AbstractFormDuty}
# we use storage units in the read only mode, as all modifications
# (deactivating artificial vars and enforcing integrality)
# are reverted before the end of the algorithm,
# so the state of the formulation remains the same
units_usage = Tuple{AbstractModel, UnitType, UnitPermission}[]
push!(units_usage, (form, StaticVarConstrUnit, READ_ONLY))
if Duty <: MathProg.AbstractMasterDuty
push!(units_usage, (form, MasterColumnsUnit, READ_ONLY))
push!(units_usage, (form, MasterBranchConstrsUnit, READ_ONLY))
push!(units_usage, (form, MasterCutsUnit, READ_ONLY))
push!(units_usage, (form, PartialSolutionUnit, READ_ONLY))
end
return units_usage
end
get_units_usage(algo::SolveIpForm, reform::Reformulation) =
get_units_usage(algo, getmaster(reform))
# get_units_usage of UserOptimize
function get_units_usage(::UserOptimize, spform::Formulation{DwSp})
units_usage = Tuple{AbstractModel, UnitType, UnitPermission}[]
return units_usage
end
# no get_units_usage of CustomOptimize because it directly calls the
# get_units_usage of the custom optimizer
################################################################################
# run! methods (depends on the type of the optimizer)
################################################################################
function check_if_optimizer_supports_ip(optimizer::MoiOptimizer)
return MOI.supports_constraint(optimizer.inner, MOI.VariableIndex, MOI.Integer)
end
check_if_optimizer_supports_ip(optimizer::UserOptimizer) = false
check_if_optimizer_supports_ip(optimizer::NoOptimizer) = false
# run! of MoiOptimize
function run!(
algo::MoiOptimize, ::Env, form::Formulation, input::OptimizationState;
optimizer_id::Int = 1
)
result = OptimizationState(
form,
ip_primal_bound = get_ip_primal_bound(input),
max_length_ip_primal_sols = algo.max_nb_ip_primal_sols
)
optimizer = getoptimizer(form, optimizer_id)
ip_supported = check_if_optimizer_supports_ip(optimizer)
if !ip_supported
@warn "Optimizer of formulation with id =", getuid(form),
" does not support integer variables. Skip SolveIpForm algorithm."
return result
end
primal_sols = optimize_ip_form!(algo, optimizer, form, result)
if length(primal_sols) > 0
for primal_sol in primal_sols
add_ip_primal_sol!(result, primal_sol)
end
if algo.log_level == 0
@printf "Found primal solution of %.4f \n" getvalue(get_ip_primal_bound(result))
end
@logmsg LogLevel(-3) get_best_ip_primal_sol(result)
else
if algo.log_level == 0
println(
"No primal solution found. Termination status is ",
getterminationstatus(result), ". "
)
end
end
if algo.get_dual_bound && getterminationstatus(result) == OPTIMAL
dual_bound = getvalue(get_ip_primal_bound(result))
set_ip_dual_bound!(result, DualBound(form, dual_bound))
set_lp_dual_bound!(result, DualBound(form, dual_bound))
end
if algo.get_dual_solution && getterminationstatus(result) == OPTIMAL
dual_sols = get_dual_solutions(form, optimizer)
if length(dual_sols) > 0
coeff = getobjsense(form) == MinSense ? 1.0 : -1.0
lp_dual_sol_pos = argmax(coeff * getvalue.(dual_sols))
lp_dual_sol = dual_sols[lp_dual_sol_pos]
set_lp_dual_sol!(result, lp_dual_sol)
end
end
return result
end
function optimize_ip_form!(
algo::MoiOptimize, optimizer::MoiOptimizer, form::Formulation, result::OptimizationState
)
# No way to enforce upper bound or lower bound through MOI.
# We must add a constraint c'x <= UB in formulation.
if algo.deactivate_artificial_vars
deactivate!(form, vcid -> isanArtificialDuty(getduty(vcid)))
end
if algo.enforce_integrality
enforce_integrality!(form)
end
optimize_with_moi!(optimizer, form, algo, result)
primal_sols = get_primal_solutions(form, optimizer)
if algo.enforce_integrality
relax_integrality!(form)
end
if algo.deactivate_artificial_vars
activate!(form, vcid -> isanArtificialDuty(getduty(vcid)))
end
return primal_sols
end
# errors for the pricing callback
"""
IncorrectPricingDualBound
Error thrown when transmitting a dual bound larger than the primal bound of the
best solution to the pricing subproblem found in a run of the pricing callback.
"""
struct IncorrectPricingDualBound
pb::ColunaBase.Bound
db::Union{Nothing,ColunaBase.Bound}
end
"""
MissingPricingDualBound
Error thrown when the pricing callback does not transmit any dual bound.
Make sure you call `MOI.submit(model, BD.PricingDualBound(cbdata), db)` in your pricing
callback.
"""
struct MissingPricingDualBound end
"""
MultiplePricingDualBounds
Error thrown when the pricing transmits multiple dual bound.
Make sure you call `MOI.submit(model, BD.PricingDualBound(cbdata), db)` only once in your
pricing callback.
"""
struct MultiplePricingDualBounds
nb_dual_bounds::Int
end
# run! of UserOptimize
function run!(
algo::UserOptimize, ::Env, spform::Formulation{DwSp}, input::OptimizationState;
optimizer_id::Int = 1
)
result = OptimizationState(
spform,
ip_primal_bound = get_ip_primal_bound(input),
max_length_ip_primal_sols = algo.max_nb_ip_primal_sols
)
optimizer = getoptimizer(spform, optimizer_id)
cbdata = MathProg.PricingCallbackData(spform)
optimizer.user_oracle(cbdata)
if cbdata.nb_times_dual_bound_set == 0
throw(MissingPricingDualBound())
elseif cbdata.nb_times_dual_bound_set > 1
throw(MultiplePricingDualBounds(cbdata.nb_times_dual_bound_set))
end
# If the user does not submit any primal solution, we consider the primal as infeasible.
primal_infeasible = length(cbdata.primal_solutions) == 0
# If the dual bound from the pricing callback data is nothing, we consider the dual as
# infeasible.
dual_infeasible = isnothing(cbdata.dual_bound)
set_ip_dual_bound!(result, DualBound(spform, cbdata.dual_bound))
db = get_ip_dual_bound(result)
for primal_sol in cbdata.primal_solutions
add_ip_primal_sol!(result, primal_sol)
end
pb = get_ip_primal_bound(result)
if primal_infeasible && isunbounded(db)
setterminationstatus!(result, INFEASIBLE)
set_ip_primal_bound!(result, nothing)
elseif isunbounded(pb) && dual_infeasible
setterminationstatus!(result, UNBOUNDED)
set_ip_dual_bound!(result, nothing)
elseif abs(gap(pb, db)) <= 1e-4
setterminationstatus!(result, OPTIMAL)
elseif gap(pb, db) < -1e-4
throw(IncorrectPricingDualBound(pb, db))
else
setterminationstatus!(result, OTHER_LIMIT)
end
return result
end
# No run! method for CustomOptimize because it directly calls the run! method
# of the custom optimizer | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 4485 | """
Coluna.Algorithm.SolveLpForm(
get_ip_primal_sol = false,
get_dual_sol = false,
relax_integrality = false,
get_dual_bound = false,
silent = true
)
Solve a linear program stored in a formulation using its first optimizer.
This algorithm works only if the optimizer is interfaced with MathOptInterface.
You can define the optimizer using the `default_optimizer` attribute of Coluna or
with the method `specify!` from BlockDecomposition
Parameters:
- `get_ip_primal_sol`: update the primal solution of the formulation if equals `true`
- `get_dual_sol`: retrieve the dual solution and store it in the ouput if equals `true`
- `relax_integrality`: relax integer variables of the formulation before optimization if equals `true`
- `get_dual_bound`: store the dual objective value in the output if equals `true`
- `silent`: set `MOI.Silent()` to its value
Undocumented parameters are alpha.
"""
struct SolveLpForm <: AbstractOptimizationAlgorithm
get_ip_primal_sol::Bool
get_dual_sol::Bool
relax_integrality::Bool
get_dual_bound::Bool
silent::Bool
SolveLpForm(;
get_ip_primal_sol = false,
get_dual_sol = false,
relax_integrality = false,
get_dual_bound = false,
silent = true
) = new(get_ip_primal_sol, get_dual_sol, relax_integrality, get_dual_bound, silent)
end
# SolveLpForm does not have child algorithms, therefore get_child_algorithms() is not defined
function get_units_usage(
algo::SolveLpForm, form::Formulation{Duty}
) where {Duty<:MathProg.AbstractFormDuty}
# we use units in the read only mode, as relaxing integrality
# is reverted before the end of the algorithm,
# so the state of the formulation remains the same
units_usage = Tuple{AbstractModel, UnitType, UnitPermission}[]
push!(units_usage, (form, StaticVarConstrUnit, READ_ONLY))
if Duty <: MathProg.AbstractMasterDuty
push!(units_usage, (form, MasterColumnsUnit, READ_ONLY))
push!(units_usage, (form, PartialSolutionUnit, READ_ONLY))
push!(units_usage, (form, MasterBranchConstrsUnit, READ_ONLY))
push!(units_usage, (form, MasterCutsUnit, READ_ONLY))
end
return units_usage
end
function optimize_lp_form!(::SolveLpForm, optimizer, ::Formulation, ::OptimizationState) # fallback
error("Cannot optimize LP formulation with optimizer of type ", typeof(optimizer), ".")
end
function optimize_lp_form!(
algo::SolveLpForm, optimizer::MoiOptimizer, form::Formulation, result::OptimizationState
)
moi_params = MoiOptimize(
time_limit = 3600, # TODO: expose
deactivate_artificial_vars = false,
enforce_integrality = false,
relax_integrality = algo.relax_integrality,
get_dual_bound = algo.get_dual_bound,
get_dual_solution = algo.get_dual_sol,
silent = algo.silent
)
optimize_with_moi!(optimizer, form, moi_params, result)
return
end
function run!(
algo::SolveLpForm, ::Env, form::Formulation, input::OptimizationState,
optimizer_id::Int = 1
)
result = OptimizationState(form)
TO.@timeit Coluna._to "SolveLpForm" begin
if algo.relax_integrality
relax_integrality!(form)
end
optimizer = getoptimizer(form, optimizer_id)
optimize_lp_form!(algo, optimizer, form, result)
primal_sols = get_primal_solutions(form, optimizer)
coeff = getobjsense(form) == MinSense ? 1.0 : -1.0
if algo.get_dual_sol
dual_sols = get_dual_solutions(form, optimizer)
if length(dual_sols) > 0
lp_dual_sol_pos = argmax(coeff * getvalue.(dual_sols))
lp_dual_sol = dual_sols[lp_dual_sol_pos]
set_lp_dual_sol!(result, lp_dual_sol)
if algo.get_dual_bound
db = DualBound(form, getvalue(lp_dual_sol))
set_lp_dual_bound!(result, db)
end
end
end
if length(primal_sols) > 0
lp_primal_sol_pos = argmin(coeff * getvalue.(primal_sols))
lp_primal_sol = primal_sols[lp_primal_sol_pos]
add_lp_primal_sol!(result, lp_primal_sol)
pb = PrimalBound(form, getvalue(lp_primal_sol))
set_lp_primal_bound!(result, pb)
if algo.get_ip_primal_sol && isinteger(lp_primal_sol) &&
!contains(lp_primal_sol, varid -> isanArtificialDuty(getduty(varid)))
add_ip_primal_sol!(result, lp_primal_sol)
end
end
end # @timeit
return result
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 4198 | ################################################################################
# Parameters for each type of optimizer (subsolver).
################################################################################
"""
MoiOptimize(
time_limit = 600
deactivate_artificial_vars = false
enforce_integrality = false
get_dual_bound = true
)
Configuration for an optimizer that calls a subsolver through MathOptInterface.
Parameters:
- `time_limit`: in seconds
- `deactivate_artificial_vars`: deactivate all artificial variables of the formulation if equals `true`
- `enforce_integrality`: enforce integer variables that are relaxed if equals `true`
- `get_dual_bound`: store the dual objective value in the output if equals `true`
"""
@with_kw struct MoiOptimize
time_limit::Int = 600
deactivate_artificial_vars::Bool = true
enforce_integrality::Bool = true
relax_integrality::Bool = false
get_dual_bound::Bool = true
get_dual_solution::Bool = false # Used in MOI integration tests.
max_nb_ip_primal_sols::Int = 50
log_level::Int = 2
silent::Bool = true
custom_parameters = Dict{String,Any}()
end
function _get_cur_optimizer_params(optimizer::MoiOptimizer, algo::MoiOptimize)
moi_parameters = Dict{DataType, Any}()
for param_type in [MOI.TimeLimitSec, MOI.Silent]
moi_parameters[param_type] = MOI.get(optimizer.inner, param_type())
end
raw_parameters = Dict{String, Any}()
for name in Iterators.keys(algo.custom_parameters)
raw_parameters[name] = MOI.get(optimizer.inner, MOI.RawOptimizerAttribute(name))
end
return moi_parameters, raw_parameters
end
function _set_optimizer_params!(optimizer::MoiOptimizer, moi_parameters, raw_parameters)
for (param_type, value) in moi_parameters
MOI.set(optimizer.inner, param_type(), value)
end
for (param_name, value) in raw_parameters
MOI.set(optimizer.inner, MOI.RawOptimizerAttribute(param_name), value)
end
return
end
function _termination_status!(result::OptimizationState, optimizer::MoiOptimizer)
termination_status = MOI.get(getinner(optimizer), MOI.TerminationStatus())
coluna_termination_status = convert_status(termination_status)
if coluna_termination_status == OPTIMAL
if MOI.get(getinner(optimizer), MOI.ResultCount()) <= 0
msg = """
Termination status = $(termination_status) but no results.
Please, open an issue at https://github.com/atoptima/Coluna.jl/issues
"""
error(msg)
end
end
setterminationstatus!(result, coluna_termination_status)
return
end
function optimize_with_moi!(
optimizer::MoiOptimizer, form::Formulation, algo::MoiOptimize, result::OptimizationState
)
# Get current parameter values that we are going to set.
init_moi_params, init_raw_params = _get_cur_optimizer_params(optimizer, algo)
# Set parameters.
cur_moi_params = Dict(
MOI.TimeLimitSec => algo.time_limit,
MOI.Silent => algo.silent
)
cur_raw_params = algo.custom_parameters
_set_optimizer_params!(optimizer, cur_moi_params, cur_raw_params)
# Synchronize the subsolver by transmitting all buffered formulation changes.
sync_solver!(optimizer, form)
nbvars = MOI.get(optimizer.inner, MOI.NumberOfVariables())
if nbvars <= 0
@warn "No variable in the formulation."
end
# Solve.
MOI.optimize!(getinner(optimizer))
# Reset parameters.
_set_optimizer_params!(optimizer, init_moi_params, init_raw_params)
# Retrieve termination status from MOI and convert into Coluna termination status.
_termination_status!(result, optimizer)
return
end
"""
UserOptimize(
max_nb_ip_primal_sols = 50
)
Configuration for an optimizer that calls a pricing callback to solve the problem.
Parameters:
- `max_nb_ip_primal_sols`: maximum number of solutions returned by the callback kept
"""
@with_kw struct UserOptimize
max_nb_ip_primal_sols::Int = 50
end
"""
CustomOptimize()
Configuration for an optimizer that calls a custom solver to solve a custom model.
"""
struct CustomOptimize end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 24453 | """
BendersContext(reformulation, algo_params) -> BendersContext
Default implementation of the Benders algorithm.
"""
struct BendersContext <: Benders.AbstractBendersContext
reform::Reformulation
optim_sense
restr_master_solve_alg
restr_master_optimizer_id::Int
nb_benders_iteration_limits::Int
rhs_helper::RhsCalculationHelper
second_stage_cost_var_ids::Vector{VarId}
separation_solve_alg
function BendersContext(reform, alg)
return new(
reform,
getobjsense(reform),
alg.restr_master_solve_alg,
alg.restr_master_optimizer_id,
alg.max_nb_iterations,
RhsCalculationHelper(reform),
_second_stage_cost_var_ids(reform),
alg.separation_solve_alg
)
end
end
function _second_stage_cost_var_ids(reform)
var_ids = VarId[]
for (_, sp) in get_benders_sep_sps(reform)
id = sp.duty_data.second_stage_cost_var
@assert !isnothing(id)
push!(var_ids, id)
end
return var_ids
end
Benders.is_minimization(ctx::BendersContext) = ctx.optim_sense == MinSense
Benders.get_reform(ctx::BendersContext) = ctx.reform
Benders.get_master(ctx::BendersContext) = getmaster(ctx.reform)
Benders.get_benders_subprobs(ctx::BendersContext) = get_benders_sep_sps(ctx.reform)
_deactivate_art_vars(sp) = deactivate!(sp, vcid -> isanArtificialDuty(getduty(vcid)))
_activate_art_vars(sp) = activate!(sp, vcid -> isanArtificialDuty(getduty(vcid)))
function Benders.setup_reformulation!(reform::Reformulation, env)
for (_, sp) in get_benders_sep_sps(reform)
_deactivate_art_vars(sp)
end
return
end
"""
Output of the default implementation of the `Benders.optimize_master_problem!` method.
It contains:
- `ip_solver`: `true` if the master problem is solved with a MIP solver and involves integral variables, `false` otherwise.
- `result`: the result of the master problem optimization stored in an `OptimizationState` object.
- `infeasible`: `true` if the master at the current iteration is infeasible; `false` otherwise.
- `unbounded`: `true` if the master at the current iteration is unbounded; `false` otherwise.
- `certificate`: `true` if the master at the current iteration is unbounded and if the current result is a dual infeasibility certificate, `false` otherwise.
"""
struct BendersMasterResult{F}
ip_solver::Bool
result::OptimizationState{F}
infeasible::Bool
unbounded::Bool
certificate::Bool
end
Benders.is_unbounded(master_res::BendersMasterResult) = master_res.unbounded
Benders.is_infeasible(master_res::BendersMasterResult) = master_res.infeasible
Benders.is_certificate(master_res::BendersMasterResult) = master_res.certificate
function Benders.get_primal_sol(master_res::BendersMasterResult)
if master_res.ip_solver
return get_best_ip_primal_sol(master_res.result)
end
return get_best_lp_primal_sol(master_res.result)
end
Benders.get_dual_sol(master_res::BendersMasterResult) = get_best_lp_dual_sol(master_res.result)
Benders.get_obj_val(master_res::BendersMasterResult) = getvalue(Benders.get_primal_sol(master_res))
function _reset_second_stage_cost_var_inc_vals(ctx::BendersContext)
for var_id in ctx.second_stage_cost_var_ids
setcurincval!(Benders.get_master(ctx), var_id, 0.0)
end
return
end
function _update_second_stage_cost_var_inc_vals(ctx::BendersContext, master_res::BendersMasterResult)
isnothing(Benders.get_primal_sol(master_res)) && return
_reset_second_stage_cost_var_inc_vals(ctx)
for (var_id, val) in Benders.get_primal_sol(master_res)
setcurincval!(Benders.get_master(ctx), var_id, val)
end
return
end
function Benders.optimize_master_problem!(master, ctx::BendersContext, env)
rm_input = OptimizationState(master)
opt_state = run!(ctx.restr_master_solve_alg, env, master, rm_input, ctx.restr_master_optimizer_id)
ip_solver = typeof(ctx.restr_master_solve_alg) <: SolveIpForm
unbounded = getterminationstatus(opt_state) == UNBOUNDED
infeasible = getterminationstatus(opt_state) == INFEASIBLE
master_res = BendersMasterResult(ip_solver, opt_state, infeasible, unbounded, false)
_update_second_stage_cost_var_inc_vals(ctx, master_res)
return master_res
end
function Benders.treat_unbounded_master_problem_case!(master, ctx::BendersContext, env)
mast_result = nothing
ip_solver = typeof(ctx.restr_master_solve_alg) <: SolveIpForm
# In the unbounded case, to get a dual infeasibility certificate, we need to relax the
# integrality and solve the master again. (at least with GLPK & the current implementation of SolveIpForm)
if ip_solver
relax_integrality!(master)
rm_input = OptimizationState(master)
opt_state = run!(SolveLpForm(get_dual_sol = true), env, master, rm_input, ctx.restr_master_optimizer_id)
enforce_integrality!(master)
end
# We can derive a cut from the extreme ray
certificates = MathProg.get_dual_infeasibility_certificate(master, getoptimizer(master, ctx.restr_master_optimizer_id))
if length(certificates) > 0
opt_state = OptimizationState(master; )
set_ip_primal_sol!(opt_state, first(certificates))
set_lp_primal_sol!(opt_state, first(certificates))
mast_result = BendersMasterResult(ip_solver, opt_state, false, false, true)
_update_second_stage_cost_var_inc_vals(ctx, mast_result)
else
# If there is no dual infeasibility certificate, we set the cost of the second stage
# cost variable to zero and solve the master.
# TODO: This trick can backfire on us if the optimizer finds the master unbounded
# and does not return any dual infeasibility certificate for several consecutive iterations.
# It this case, we can end up with the same first level solution over and over again
# and probably be trapped in an infinite loop.
# We can escape the infinite loop by implementing a cut duplication checker but the
# algorithm won't exit gracefully.
set_second_stage_var_costs_to_zero!(ctx)
mast_result = Benders.optimize_master_problem!(master, ctx, env)
reset_second_stage_var_costs!(ctx)
end
return mast_result
end
function set_second_stage_var_costs_to_zero!(ctx::BendersContext)
master = Coluna.MathProg.getmaster(ctx.reform)
vars = filter(varid -> getduty(varid) <= MasterBendSecondStageCostVar, keys(getvars(master)))
for varid in vars
setcurcost!(master, varid, 0.0)
end
return
end
function reset_second_stage_var_costs!(ctx::BendersContext)
master = Coluna.MathProg.getmaster(ctx.reform)
vars = filter(varid -> getduty(varid) <= MasterBendSecondStageCostVar, keys(getvars(master)))
for varid in vars
setcurcost!(master, varid, getperencost(master, varid))
end
return
end
function Benders.update_sp_rhs!(ctx::BendersContext, sp, mast_primal_sol)
spid = getuid(sp)
peren_rhs = ctx.rhs_helper.rhs[spid]
T = ctx.rhs_helper.T[spid]
new_rhs = peren_rhs - T * mast_primal_sol
for (constr_id, constr) in getconstrs(sp)
if getduty(constr_id) <= BendSpTechnologicalConstr
setcurrhs!(sp, constr_id, new_rhs[constr_id])
else
setcurrhs!(sp, constr_id, getperenrhs(sp, constr_id))
end
end
for (var_id, var) in getvars(sp)
setcurlb!(sp, var_id, getperenlb(sp, var_id))
setcurub!(sp, var_id, getperenub(sp, var_id))
end
return
end
function Benders.setup_separation_for_unbounded_master_case!(ctx::BendersContext, sp, mast_primal_sol)
spid = getuid(sp)
T = ctx.rhs_helper.T[spid]
new_rhs = T * mast_primal_sol
for (constr_id, constr) in getconstrs(sp)
if getduty(constr_id) <= BendSpTechnologicalConstr
setcurrhs!(sp, constr_id, - new_rhs[constr_id])
else
setcurrhs!(sp, constr_id, 0.0)
end
end
for (var_id, var) in getvars(sp)
if !(getduty(var_id) <= BendSpSecondStageArtVar)
if getobjsense(sp) == MinSense
setcurlb!(sp, var_id, 0.0)
setcurub!(sp, var_id, Inf)
else
setcurlb!(sp, var_id, 0.0)
setcurub!(sp, var_id, Inf)
end
end
end
return
end
"""
Solution to the separation problem together with its corresponding benders cut.
It contains:
- `min_sense`: `true` if it's a minimization problem; `false` otherwise.
- `lhs`: the left-hand side of the cut.
- `rhs`: the right-hand side of the cut.
- `dual_sol`: an optimal dual solution to the separation problem.
"""
struct GeneratedCut{F}
min_sense::Bool
lhs::Dict{VarId, Float64}
rhs::Float64
dual_sol::DualSolution{F}
end
"""
Stores a collection of cuts.
It contains `cuts` a vector of `GeneratedCut` objects.
"""
struct CutsSet
cuts::Vector{GeneratedCut}
CutsSet() = new(GeneratedCut[])
end
Base.iterate(set::CutsSet) = iterate(set.cuts)
Base.iterate(set::CutsSet, state) = iterate(set.cuts, state)
Benders.set_of_cuts(::BendersContext) = CutsSet()
"""
Primal solutions to the separation problems optimized at the current iteration.
This is used to build a primal solution.
It contains `sols` a vector of primal solutions.
"""
struct SepSolSet{F}
sols::Vector{MathProg.PrimalSolution{F}}
end
SepSolSet{F}() where {F} = SepSolSet{F}(MathProg.PrimalSolution{F}[])
Benders.set_of_sep_sols(::BendersContext) = SepSolSet{MathProg.Formulation{MathProg.BendersSp}}()
"""
Output of the default implementation of the `Benders.optimize_separation_problem!` and
`Benders.treat_infeasible_separation_problem_case!` methods.
It contains:
- `second_stage_estimation_in_master`: the value of the second stage cost variable in the solution to the master problem.
- `second_stage_cost`: the value of the second stage cost variable in the solution to the separation problem.
- `lp_primal_sol`: the primal solution to the separation problem.
- `infeasible`: `true` if the current separation problem is infeasible; `false` otherwise.
- `unbounded`: `true` if the current separation problem is unbounded; `false` otherwise.
- `cut`: the cut generated by the separation problem.
- `infeasible_treatment`: `true` if this object is an output of the `Benders.treat_infeasible_separation_problem_case!` method; `false` otherwise.
- `unbounded_master`: `true` if the separation subproblem has the form of Lemma 2 to separate a cut to truncate an unbounded ray of the restricted master problem; `false` otherwise.
"""
struct BendersSeparationResult{F}
second_stage_estimation_in_master::Float64
second_stage_cost::Union{Nothing,Float64}
lp_primal_sol::Union{Nothing,MathProg.PrimalSolution{F}}
infeasible::Bool
unbounded::Bool
cut::Union{Nothing,GeneratedCut{F}}
infeasible_treatment::Bool
unbounded_master::Bool
end
Benders.get_obj_val(res::BendersSeparationResult) = res.second_stage_cost
Benders.get_primal_sol(res::BendersSeparationResult) = res.lp_primal_sol
Benders.is_infeasible(res::BendersSeparationResult) = res.infeasible
Benders.is_unbounded(res::BendersSeparationResult) = res.unbounded
Benders.get_dual_sol(res::BendersSeparationResult) = res.cut.dual_sol
## original MIP:
## min cx + dy s.t.
## Ax >= b
## Tx + Qy >= r
## x, y >= 0, x ∈ Z^n
## master:
## min cx + η
## Ax >= B
## < benders cuts >
## SP: ## depends on master attributes (e.g. unbounded), x* fixed
## min dy
## Tx* + Qy >= r
## y >= 0
## π: dual sol
## η: contribution to the objective of the second-level variables
## feasibility cut: πTx >= πr
## optimality cut: η + πTx >= πr
## Depending on the nature of the cut (feasibility of optimality cut), the left hand side of the cut is equal to either 0.η + πT.x or to 1.η + πT.x. In both cases we have to compute the coefficients behind x variables using the matrix T. The coefficients are stored in a dictionnary cut_lhs that matches each var id with its coefficient in the cut.
## second_stage_cost_var: id of the variable η representing the cost of the second stage variables
## T: the matrix which stores the coefficients of x variables in the current subproblem
## dual sol: the dual solution π of the current subproblem
## feasibility_cut: boolean set to true if the current cut is a feasibility cut, false otherwise
function _compute_cut_lhs(ctx, sp, dual_sol, feasibility_cut)
cut_lhs = Dict{VarId, Float64}()
coeffs = transpose(ctx.rhs_helper.T[getuid(sp)]) * dual_sol ## πTx
for (varid, coeff) in zip(findnz(coeffs)...)
cut_lhs[varid] = coeff
end
if feasibility_cut
cut_lhs[sp.duty_data.second_stage_cost_var] = 0.0 ## πTx (feasibility cut)
else
cut_lhs[sp.duty_data.second_stage_cost_var] = 1.0 ## η + πTx (optimality cut)
end
return cut_lhs
end
## For both feasibility and optimality cuts, the right-hand side is given by πr with π the dual solution of the current sp and r the right-hand side of the sp linear constraints. However, in the implementation, the bounding constraints are considered separately from the other linear constraints. Thus, we add to πr the contribution of the bounding constraints to the right-hand side of our cut.
function _compute_cut_rhs_contrib(ctx, sp, dual_sol)
spid = getuid(sp)
bounds_contrib_to_rhs = 0.0 ##init bounding constraints contribution to the right-hand side of the cut
for (varid, (val, active_bound)) in get_var_redcosts(dual_sol) ##compute bounding constraints contribution ; val is the dual value of the bounding constraint, active_bound indicates whoever the bound is a LOWER or a UPPER bound
if active_bound == MathProg.LOWER
bounds_contrib_to_rhs += val * getperenlb(sp, varid)
elseif active_bound == MathProg.UPPER
bounds_contrib_to_rhs += val * getperenub(sp, varid)
end
end
cut_rhs = transpose(dual_sol) * ctx.rhs_helper.rhs[spid] + bounds_contrib_to_rhs ## πr + bounding constraints contrib
return cut_rhs
end
function Benders.optimize_separation_problem!(ctx::BendersContext, sp::Formulation{BendersSp}, env, unbounded_master)
spid = getuid(sp)
second_stage_cost_var = sp.duty_data.second_stage_cost_var
@assert !isnothing(second_stage_cost_var)
estimated_cost = getcurincval(Benders.get_master(ctx), second_stage_cost_var)
input = OptimizationState(sp)
opt_state = run!(ctx.separation_solve_alg, env, sp, input)
if getterminationstatus(opt_state) == UNBOUNDED
return BendersSeparationResult{Formulation{BendersSp}}(estimated_cost, nothing, get_best_lp_primal_sol(opt_state), false, true, nothing, false, unbounded_master)
end
if getterminationstatus(opt_state) == INFEASIBLE ## we then enter treat_infeasible_separation_problem_case! (phase 1)
return BendersSeparationResult{Formulation{BendersSp}}(estimated_cost, nothing, get_best_lp_primal_sol(opt_state), true, false, nothing, false, unbounded_master)
end
## create and add cuts to the result
dual_sol = get_best_lp_dual_sol(opt_state)
cost = getvalue(dual_sol)
min_sense = Benders.is_minimization(ctx)
cut_lhs = _compute_cut_lhs(ctx, sp, dual_sol, false)
cut_rhs = _compute_cut_rhs_contrib(ctx, sp, dual_sol)
cut = GeneratedCut(min_sense, cut_lhs, cut_rhs, dual_sol)
return BendersSeparationResult(estimated_cost, cost, get_best_lp_primal_sol(opt_state), false, false, cut, false, unbounded_master)
end
function Benders.master_is_unbounded(ctx::BendersContext, second_stage_cost, unbounded_master_case)
if !unbounded_master_case
return false
end
estimated_cost = 0
for (spid, sp) in Benders.get_benders_subprobs(ctx)
second_stage_cost_var = sp.duty_data.second_stage_cost_var
estimated_cost += getcurincval(Benders.get_master(ctx), second_stage_cost_var) ## compute cost η considering ALL subproblems
end
min_sense = Benders.is_minimization(ctx)
sc = min_sense ? 1.0 : - 1.0
return sc * second_stage_cost < sc * estimated_cost + 1e-5
end
## it is a phase 1: add artificial variables in order to find a feasible solution
function Benders.treat_infeasible_separation_problem_case!(ctx::BendersContext, sp::Formulation{BendersSp}, env, unbounded_master_case)
second_stage_cost_var = sp.duty_data.second_stage_cost_var
@assert !isnothing(second_stage_cost_var)
estimated_cost = getcurincval(Benders.get_master(ctx), second_stage_cost_var)
for (varid, _) in getvars(sp)
if !isanArtificialDuty(getduty(varid))
setcurcost!(sp, varid, 0.0)
end
end
_activate_art_vars(sp)
input = OptimizationState(sp)
opt_state = run!(ctx.separation_solve_alg, env, sp, input)
for (varid, _) in getvars(sp)
if !isanArtificialDuty(getduty(varid))
setcurcost!(sp, varid, getperencost(sp, varid))
end
end
_deactivate_art_vars(sp)
if getterminationstatus(opt_state) == INFEASIBLE
error("A") # should not happen
end
dual_sol = get_best_lp_dual_sol(opt_state)
cost = getvalue(dual_sol)
min_sense = Benders.is_minimization(ctx)
sc = min_sense ? 1.0 : - 1.0
if sc * cost <= 0
error("B") # should not happen
end
cut_lhs = _compute_cut_lhs(ctx, sp, dual_sol, true)
cut_rhs = _compute_cut_rhs_contrib(ctx, sp, dual_sol)
cut = GeneratedCut(min_sense, cut_lhs, cut_rhs, dual_sol)
return BendersSeparationResult(estimated_cost, cost, get_best_lp_primal_sol(opt_state), false, false, cut, true, unbounded_master_case)
end
function Benders.push_in_set!(ctx::BendersContext, set::CutsSet, sep_result::BendersSeparationResult)
if isnothing(sep_result.cut)
return false
end
sc = Benders.is_minimization(ctx) ? 1.0 : -1.0
eq = abs(sep_result.second_stage_cost - sep_result.second_stage_estimation_in_master) < 1e-5
gt = sc * sep_result.second_stage_cost + 1e-5 > sc * sep_result.second_stage_estimation_in_master
# if cost of separation result > second cost variable in master result
if !eq && gt || sep_result.infeasible_treatment
push!(set.cuts, sep_result.cut)
return true
end
return false
end
function Benders.push_in_set!(ctx::BendersContext, set::SepSolSet, sep_result::BendersSeparationResult)
push!(set.sols, Benders.get_primal_sol(sep_result))
end
struct CutAlreadyInsertedBendersWarning
cut_in_master::Bool
cut_is_active::Bool
cut_id::ConstrId
master::Formulation{BendersMaster}
subproblem::Formulation{BendersSp}
end
function Base.show(io::IO, err::CutAlreadyInsertedBendersWarning)
msg = """
Unexpected constraint state during cut insertion.
======
Cut id: $(err.cut_id).
The cut is in the master ? $(err.cut_in_master).
The cut is active ? $(err.cut_is_active).
======
"""
println(io, msg)
end
function Benders.insert_cuts!(reform, ctx::BendersContext, cuts)
master = Benders.get_master(ctx)
cuts_to_insert = GeneratedCut[]
cut_ids_to_activate = Set{ConstrId}()
for cut in cuts.cuts
dual_sol = cut.dual_sol
spform = getmodel(dual_sol)
pool = get_dual_sol_pool(spform)
cut_id = MathProg.get_from_pool(pool, dual_sol)
if !isnothing(cut_id)
if haskey(master, cut_id) && !iscuractive(master, cut_id)
push!(cut_ids_to_activate, cut_id)
else
in_master = haskey(master, cut_id)
is_active = iscuractive(master, cut_id)
warning = CutAlreadyInsertedBendersWarning(
in_master, is_active, cut_id, master, spform
)
throw(warning) # TODO: parameter
end
else
push!(cuts_to_insert, cut)
end
end
nb_added_cuts = 0
nb_reactivated_cut = 0
# Then, we add the new cuts (i.e. not in the pool)
cut_ids = ConstrId[]
for cut in cuts_to_insert
constr = setconstr!(
master, "Benders", MasterBendCutConstr;
rhs = cut.rhs,
members = cut.lhs,
sense = cut.min_sense ? Greater : Less,
)
push!(cut_ids, getid(constr))
dual_sol = cut.dual_sol
spform = getmodel(dual_sol)
pool = get_dual_sol_pool(spform)
# if store_in_sp_pool
cut_id = ConstrId(getid(constr); duty = MasterBendCutConstr)
MathProg.push_in_pool!(pool, dual_sol, cut_id, getvalue(dual_sol))
# end
nb_added_cuts += 1
end
# Finally, we reactivate the cuts that were already in the pool
for cut_id in cut_ids_to_activate
activate!(master, cut_id)
push!(cut_ids, cut_id)
nb_reactivated_cut += 1
end
return cut_ids
end
function Benders.build_primal_solution(context::BendersContext, mast_primal_sol, sep_sp_sols)
# Keep BendSpSepVar and MasterPureVar
var_ids = VarId[]
var_vals = Float64[]
for (varid, val) in mast_primal_sol
if getduty(varid) <= MasterPureVar
push!(var_ids, varid)
push!(var_vals, val)
end
end
for sp_sol in sep_sp_sols.sols
for (varid, val) in sp_sol
#if getduty(varid) <= BendSpSepVar
push!(var_ids, varid)
push!(var_vals, val)
#end
end
end
return Coluna.PrimalSolution(
Benders.get_master(context), # TODO: second stage vars does not belong to the master
var_ids,
var_vals,
getvalue(mast_primal_sol),
FEASIBLE_SOL
)
end
"""
Output of the default implementation of an iteration of the Benders algorithm.
It contains:
- `min_sense`: the original problem is a minimization problem
- `nb_new_cuts`: the number of new cuts added to the master problem
- `ip_primal_sol`: the primal solution to the original problem found during this iteration
- `infeasible`: the original problem is infeasible
- `time_limit_reached`: the time limit was reached
- `master`: the solution value to the master problem
"""
struct BendersIterationOutput <: Benders.AbstractBendersIterationOutput
min_sense::Bool
nb_new_cuts::Int
ip_primal_sol::Union{Nothing,PrimalSolution}
infeasible::Bool
time_limit_reached::Bool
master::Union{Nothing,Float64}
end
Benders.benders_iteration_output_type(ctx::BendersContext) = BendersIterationOutput
function Benders.new_iteration_output(
::Type{BendersIterationOutput},
is_min_sense,
nb_new_cuts,
ip_primal_sol,
infeasible,
time_limit_reached,
master_value
)
if !isnothing(ip_primal_sol) && contains(ip_primal_sol, varid -> isanArtificialDuty(getduty(varid)))
infeasible_subproblem = true
end
return BendersIterationOutput(
is_min_sense,
nb_new_cuts,
ip_primal_sol,
infeasible,
time_limit_reached,
master_value
)
end
"""
Output of the default implementation of the Benders algorithm.
It contains:
- `infeasible`: the original problem is infeasible
- `time_limit_reached`: the time limit was reached
- `mlp`: the final bound obtained with the Benders cut algorithm
- `ip_primal_sol`: the best primal solution to the original problem found by the Benders cut algorithm
"""
struct BendersOutput <: Benders.AbstractBendersOutput
infeasible::Bool
time_limit_reached::Bool
mlp::Union{Nothing, Float64}
ip_primal_sol::Union{Nothing,PrimalSolution}
end
Benders.benders_output_type(::BendersContext) = BendersOutput
function Benders.new_output(
::Type{BendersOutput},
benders_iter_output::BendersIterationOutput
)
return BendersOutput(
benders_iter_output.infeasible,
benders_iter_output.time_limit_reached,
benders_iter_output.master,
benders_iter_output.ip_primal_sol
)
end
Benders.stop_benders(::BendersContext, ::Nothing, benders_iteration) = false
function Benders.stop_benders(ctx::BendersContext, benders_iteration_output::BendersIterationOutput, benders_iteration)
return benders_iteration_output.infeasible ||
benders_iteration_output.time_limit_reached ||
benders_iteration_output.nb_new_cuts <= 0 ||
ctx.nb_benders_iteration_limits <= benders_iteration
end
function Benders.after_benders_iteration(::BendersContext, phase, env, iteration, benders_iter_output)
return
end | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 9257 | """
BendersPrinterContext(reformulation, algo_params) -> BendersPrinterContext
Creates a context to run the default implementation of the Benders algorithm
together with a printer that prints information about the algorithm execution.
"""
mutable struct BendersPrinterContext
inner::BendersContext
sp_elapsed_time::Float64
mst_elapsed_time::Float64
print::Bool
debug_mode::Bool
debug_print_master::Bool
debug_print_master_primal_solution::Bool
debug_print_master_dual_solution::Bool
debug_print_subproblem::Bool
debug_print_subproblem_primal_solution::Bool
debug_print_subproblem_dual_solution::Bool
debug_print_generated_cuts::Bool
end
function BendersPrinterContext(
reform, alg;
print = false,
debug_print_master = false,
debug_print_master_primal_solution = false,
debug_print_master_dual_solution = false,
debug_print_subproblem = false,
debug_print_subproblem_primal_solution = false,
debug_print_subproblem_dual_solution = false,
debug_print_generated_cuts = false,
)
debug_mode = debug_print_master ||
debug_print_master_primal_solution ||
debug_print_subproblem ||
debug_print_subproblem_primal_solution ||
debug_print_subproblem_dual_solution ||
debug_print_generated_cuts
return BendersPrinterContext(
BendersContext(reform, alg),
0.0,
0.0,
print,
debug_mode,
debug_print_master,
debug_print_master_primal_solution,
debug_print_master_dual_solution,
debug_print_subproblem,
debug_print_subproblem_primal_solution,
debug_print_subproblem_dual_solution,
debug_print_generated_cuts,
)
end
Benders.is_minimization(ctx::BendersPrinterContext) = Benders.is_minimization(ctx.inner)
Benders.get_reform(ctx::BendersPrinterContext) = Benders.get_reform(ctx.inner)
Benders.get_master(ctx::BendersPrinterContext) = Benders.get_master(ctx.inner)
Benders.get_benders_subprobs(ctx::BendersPrinterContext) = Benders.get_benders_subprobs(ctx.inner)
function Benders.optimize_master_problem!(master, ctx::BendersPrinterContext, env)
if ctx.debug_print_master || ctx.debug_print_master_primal_solution || ctx.debug_print_master_dual_solution
println(crayon"bold blue", repeat('-', 80), crayon"reset")
end
ctx.mst_elapsed_time = @elapsed begin
result = Benders.optimize_master_problem!(master, ctx.inner, env)
end
if ctx.debug_print_master
print(crayon"bold underline blue", "Master problem:", crayon"!bold !underline")
@show master
print(crayon"reset")
end
if ctx.debug_print_master_primal_solution
print(crayon"bold underline blue", "Master primal solution:", crayon"!bold !underline")
@show Benders.get_primal_sol(result)
print(crayon"reset")
end
if ctx.debug_print_master_dual_solution
print(crayon"bold underline blue", "Master dual solution:", crayon"!bold !underline")
@show Benders.get_dual_sol(result)
print(crayon"reset")
end
if ctx.debug_print_master || ctx.debug_print_master_primal_solution || ctx.debug_print_master_dual_solution
println(crayon"bold blue", repeat('-', 80), crayon"reset")
end
return result
end
function Benders.treat_unbounded_master_problem_case!(master, ctx::BendersPrinterContext, env)
result = Benders.treat_unbounded_master_problem_case!(master, ctx.inner, env)
if ctx.debug_print_master || ctx.debug_print_master_primal_solution || ctx.debug_print_master_dual_solution
println(crayon"bold blue", repeat('-', 80), crayon"reset")
println(crayon"bold underline blue", "Treat unbounded master", crayon"reset")
@show master
print(crayon"bold underline blue", "Master primal solution:", crayon"!bold !underline")
@show Benders.get_primal_sol(result)
print(crayon"reset")
println(crayon"bold blue", repeat('-', 80), crayon"reset")
end
return result
end
Benders.update_sp_rhs!(ctx::BendersPrinterContext, sp, primal_sol) = Benders.update_sp_rhs!(ctx.inner, sp, primal_sol)
Benders.setup_separation_for_unbounded_master_case!(ctx::BendersPrinterContext, sp, primal_sol) = Benders.setup_separation_for_unbounded_master_case!(ctx.inner, sp, primal_sol)
Benders.set_of_cuts(ctx::BendersPrinterContext) = Benders.set_of_cuts(ctx.inner)
Benders.set_of_sep_sols(ctx::BendersPrinterContext) = Benders.set_of_sep_sols(ctx.inner)
function Benders.optimize_separation_problem!(ctx::BendersPrinterContext, sp::Formulation{BendersSp}, env, unbounded_master)
if ctx.debug_print_subproblem || ctx.debug_print_subproblem_primal_solution || ctx.debug_print_subproblem_dual_solution
println(crayon"bold green", repeat('-', 80), crayon"reset")
end
if ctx.debug_print_subproblem
print(crayon"bold underline green", "Separation problem (unbounded master = $unbounded_master):", crayon"!bold !underline")
@show sp
print(crayon"reset")
end
ctx.sp_elapsed_time = @elapsed begin
result = Benders.optimize_separation_problem!(ctx.inner, sp, env, unbounded_master)
end
if ctx.debug_print_subproblem_primal_solution
print(crayon"bold underline green", "Separation problem primal solution:", crayon"!bold !underline")
@show Benders.get_primal_sol(result)
print(crayon"reset")
end
if ctx.debug_print_subproblem_dual_solution
print(crayon"bold underline green", "Separation problem dual solution:", crayon"!bold !underline")
@show Benders.get_dual_sol(result)
print(crayon"reset")
end
if ctx.debug_print_subproblem || ctx.debug_print_subproblem_primal_solution || ctx.debug_print_subproblem_dual_solution
println(crayon"bold green", repeat('-', 80), crayon"reset")
end
return result
end
function Benders.treat_infeasible_separation_problem_case!(ctx::BendersPrinterContext, sp, env, unbounded_master_case)
result = Benders.treat_infeasible_separation_problem_case!(ctx.inner, sp, env, unbounded_master_case)
if ctx.debug_print_subproblem || ctx.debug_print_subproblem_primal_solution || ctx.debug_print_subproblem_dual_solution
println(crayon"bold green", repeat('-', 80), crayon"reset")
end
if ctx.debug_print_subproblem
print(crayon"bold underline green", "Phase 1 Separation problem (unbounded_master = $unbounded_master_case):", crayon"!bold !underline")
@show sp
print(crayon"reset")
end
ctx.sp_elapsed_time = @elapsed begin
result = Benders.treat_infeasible_separation_problem_case!(ctx.inner, sp, env, unbounded_master_case)
end
if ctx.debug_print_subproblem_primal_solution
print(crayon"bold underline green", "Separation problem primal solution:", crayon"!bold !underline")
@show Benders.get_primal_sol(result)
print(crayon"reset")
end
if ctx.debug_print_subproblem_dual_solution
print(crayon"bold underline green", "Separation problem dual solution:", crayon"!bold !underline")
@show Benders.get_dual_sol(result)
print(crayon"reset")
end
if ctx.debug_print_subproblem || ctx.debug_print_subproblem_primal_solution || ctx.debug_print_subproblem_dual_solution
println(crayon"bold green", repeat('-', 80), crayon"reset")
end
return result
end
Benders.push_in_set!(ctx::BendersPrinterContext, set, sep_result) = Benders.push_in_set!(ctx.inner, set, sep_result)
function Benders.insert_cuts!(reform, ctx::BendersPrinterContext, cuts)
cut_ids = Benders.insert_cuts!(reform, ctx.inner, cuts)
end
Benders.benders_iteration_output_type(ctx::BendersPrinterContext) = Benders.benders_iteration_output_type(ctx.inner)
Benders.stop_benders(ctx::BendersPrinterContext, benders_iter_output, benders_iteration) = Benders.stop_benders(ctx.inner, benders_iter_output, benders_iteration)
Benders.benders_output_type(ctx::BendersPrinterContext) = Benders.benders_output_type(ctx.inner)
function _benders_iter_str(iteration, benders_iter_output, sp_time::Float64, mst_time::Float64, optim_time::Float64)
master::Float64 = isnothing(benders_iter_output.master) ? NaN : benders_iter_output.master
nb_new_cuts = benders_iter_output.nb_new_cuts
return @sprintf(
"<it=%3i> <et=%5.2f> <mst=%5.2f> <sp=%5.2f> <cuts=%2i> <master=%10.4f>",
iteration, optim_time, mst_time, sp_time, nb_new_cuts, master
)
end
function Benders.after_benders_iteration(ctx::BendersPrinterContext, phase, env, iteration, benders_iter_output)
println(_benders_iter_str(iteration, benders_iter_output, ctx.sp_elapsed_time, ctx.mst_elapsed_time, elapsed_optim_time(env)))
if ctx.debug_mode
println(crayon"bold red", repeat('-', 30), " end of iteration ", iteration, " ", repeat('-', 30), crayon"reset")
end
return
end
function Benders.build_primal_solution(context::BendersPrinterContext, mast_primal_sol, sep_sp_sols)
return Benders.build_primal_solution(context.inner, mast_primal_sol, sep_sp_sols)
end
Benders.master_is_unbounded(ctx::BendersPrinterContext, second_stage_cost, unbounded_master_case) = Benders.master_is_unbounded(ctx.inner, second_stage_cost, unbounded_master_case)
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 1433 | """
Precompute information to speed-up calculation of right-hand side of benders subproblems.
We extract the following information from the subproblems:
- `a` contains the perenial rhs of all subproblem constraints;
- `A` is a submatrix of the subproblem coefficient matrix that involves only first stage variables.
"""
struct RhsCalculationHelper
rhs::Dict{FormId, SparseVector{Float64,ConstrId}}
T::Dict{FormId, DynamicSparseMatrix{ConstrId,VarId,Float64}}
end
function _add_subproblem!(rhs, T, spid, sp)
@assert !haskey(rhs, spid) && !haskey(T, spid)
constr_ids = ConstrId[]
constr_rhs = Float64[]
for (constr_id, constr) in getconstrs(sp)
if iscuractive(sp, constr) && isexplicit(sp, constr)
push!(constr_ids, constr_id)
push!(constr_rhs, getperenrhs(sp, constr_id))
end
end
rhs[spid] = sparsevec(constr_ids, constr_rhs, Coluna.MAX_NB_ELEMS)
T[spid] = _submatrix(
sp,
(_, constr_id, _) -> getduty(constr_id) <= BendSpTechnologicalConstr,
(_, var_id, _) -> getduty(var_id) <= BendSpFirstStageRepVar
)
return
end
function RhsCalculationHelper(reform)
rhs = Dict{FormId, SparseVector{Float64,ConstrId}}()
T = Dict{FormId, DynamicSparseMatrix{ConstrId,VarId,Float64}}()
for (spid, sp) in get_benders_sep_sps(reform)
_add_subproblem!(rhs, T, spid, sp)
end
return RhsCalculationHelper(rhs, T)
end | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 12608 | ############################################################################################
# AbstractConquerInput implementation for the strong branching.
############################################################################################
"Conquer input object created by the strong branching algorithm."
struct ConquerInputFromSb <: AbstractConquerInput
global_primal_handler::GlobalPrimalBoundHandler
children_candidate::SbNode
children_units_to_restore::UnitsUsage
end
get_conquer_input_ip_dual_bound(i::ConquerInputFromSb) = get_ip_dual_bound(i.children_candidate.optstate)
get_global_primal_handler(i::ConquerInputFromSb) = i.global_primal_handler
get_node_depth(i::ConquerInputFromSb) = i.children_candidate.depth
get_units_to_restore(i::ConquerInputFromSb) = i.children_units_to_restore
############################################################################################
# NoBranching
############################################################################################
"""
Divide algorithm that does nothing. It does not generate any child.
"""
struct NoBranching <: AlgoAPI.AbstractDivideAlgorithm end
function run!(::NoBranching, ::Env, reform::Reformulation, ::Branching.AbstractDivideInput)
return DivideOutput([])
end
############################################################################################
# Branching API implementation for the (classic) branching
############################################################################################
"""
ClassicBranching(
selection_criterion = MostFractionalCriterion()
rules = [Branching.PrioritisedBranchingRule(SingleVarBranchingRule(), 1.0, 1.0)]
int_tol = 1e-6
)
Chooses the best candidate according to a selection criterion and generates the two children.
**Parameters**
- `selection_criterion`: selection criterion to choose the best candidate
- `rules`: branching rules to generate the candidates
- `int_tol`: tolerance to determine if a variable is integer
It is implemented as a specific case of the strong branching algorithm.
"""
struct ClassicBranching <: AlgoAPI.AbstractDivideAlgorithm
selection_criterion::Branching.AbstractSelectionCriterion
rules::Vector{Branching.PrioritisedBranchingRule}
int_tol::Float64
ClassicBranching(;
selection_criterion = MostFractionalCriterion(),
rules = [Branching.PrioritisedBranchingRule(SingleVarBranchingRule(), 1.0, 1.0)],
int_tol = 1e-6
) = new(selection_criterion, rules, int_tol)
end
struct BranchingContext{SelectionCriterion<:Branching.AbstractSelectionCriterion} <: Branching.AbstractBranchingContext
selection_criterion::SelectionCriterion
rules::Vector{Branching.PrioritisedBranchingRule}
max_nb_candidates::Int
int_tol::Float64
end
branching_context_type(::ClassicBranching) = BranchingContext
Branching.get_selection_nb_candidates(ctx::BranchingContext) = ctx.max_nb_candidates
function new_context(::Type{<:BranchingContext}, algo::ClassicBranching, _)
return BranchingContext(algo.selection_criterion, algo.rules, 1, algo.int_tol)
end
Branching.get_int_tol(ctx::BranchingContext) = ctx.int_tol
Branching.get_selection_criterion(ctx::BranchingContext) = ctx.selection_criterion
Branching.get_rules(ctx::BranchingContext) = ctx.rules
function _has_identical_sps(master::Formulation{DwMaster}, reform::Reformulation)
for (sp_id, sp) in get_dw_pricing_sps(reform)
lm_constr_id = sp.duty_data.lower_multiplicity_constr_id
um_constr_id = sp.duty_data.upper_multiplicity_constr_id
lb = getcurrhs(master, lm_constr_id)
ub = getcurrhs(master, um_constr_id)
if ub > 1
return true
end
end
return false
end
function _why_no_candidate(master::Formulation{DwMaster}, reform, input, extended_sol, original_sol)
integer_orig_sol = isinteger(original_sol)
integer_ext_sol = isinteger(extended_sol)
identical_sp = _has_identical_sps(master, reform)
if integer_orig_sol && !integer_ext_sol && identical_sp
message = """
The solution to the master is not integral and the projection on the original variables is integral.
Your reformulation involves subproblems with upper multiplicity greater than 1.
Column generation algorithm could not create an integral solution to the master using the column generated.
In order to generate columns that can lead to an integral solution, you may have to use a branching scheme that changes the structure of the subproblems.
This is not provided by the default implementation of the branching algorithm in the current version of Coluna.
"""
@warn message
end
return nothing
end
function _why_no_candidate(::Formulation{BendersMaster}, reform, input, extended_sol, original_sol)
return nothing
end
function Branching.why_no_candidate(reform::Reformulation, input, extended_sol, original_sol)
master = getmaster(reform)
return _why_no_candidate(master, reform, input, extended_sol, original_sol)
end
Branching.new_divide_output(children::Vector{SbNode}) = DivideOutput(children)
Branching.new_divide_output(::Nothing) = DivideOutput(SbNode[])
############################################################################################
# Branching API implementation for the strong branching
############################################################################################
"""
BranchingPhase(max_nb_candidates, conquer_algo, score)
Define a phase in strong branching. It contains the maximum number of candidates
to evaluate, the conquer algorithm which does evaluation, and the score used to sort the
candidates.
"""
struct BranchingPhase
max_nb_candidates::Int64
conquer_algo::AbstractConquerAlgorithm
score::Branching.AbstractBranchingScore
end
"""
StrongBranching(
phases = [],
rules = [Branching.PrioritisedBranchingRule(SingleVarBranchingRule(), 1.0, 1.0)],
selection_criterion = MostFractionalCriterion(),
verbose = true,
int_tol = 1e-6
)
The algorithm that performs a (multi-phase) (strong) branching in a tree search algorithm.
Strong branching is a procedure that heuristically selects a branching constraint that
potentially gives the best progress of the dual bound. The procedure selects a collection
of branching candidates based on their branching rule and their score.
Then, the procedure evaluates the progress of the dual bound in both branches of each branching
candidate by solving both potential children using a conquer algorithm.
The candidate that has the largest product of dual bound improvements in the branches
is chosen to be the branching constraint.
When the dual bound improvement produced by the branching constraint is difficult to compute
(e.g. time-consuming in the context of column generation), one can let the branching algorithm
quickly estimate the dual bound improvement of each candidate and retain the most promising
branching candidates. This is called a **phase**. The goal is to first evaluate a large number
of candidates with a very fast conquer algorithm and retain a certain number of promising ones.
Then, over the phases, it evaluates the improvement with a more precise conquer algorithm and
restrict the number of retained candidates until only one is left.
**Parameters**:
- `phases`: a vector of [`Coluna.Algorithm.BranchingPhase`](@ref)
- `rules`: a vector of [`Coluna.Algorithm.Branching.PrioritisedBranchingRule`](@ref)
- `selection_criterion`: a selection criterion to choose the initial candidates
- `verbose`: if true, print the progress of the strong branching procedure
- `int_tol`: tolerance to determine if a variable is integer
"""
struct StrongBranching <: AlgoAPI.AbstractDivideAlgorithm
phases::Vector{BranchingPhase}
rules::Vector{Branching.PrioritisedBranchingRule}
selection_criterion::Branching.AbstractSelectionCriterion
verbose::Bool
int_tol::Float64
StrongBranching(;
phases = [],
rules = [],
selection_criterion = MostFractionalCriterion(),
verbose = true,
int_tol = 1e-6
) = new(phases, rules, selection_criterion, verbose, int_tol)
end
## Implementation of Algorithm API.
# StrongBranching does not use any storage unit itself,
# therefore get_units_usage() is not defined for it
function get_child_algorithms(algo::StrongBranching, reform::Reformulation)
child_algos = Dict()
for (i, phase) in enumerate(algo.phases)
child_algos["phase$i"] = (phase.conquer_algo, reform)
end
for (i, prioritised_rule) in enumerate(algo.rules)
child_algos["rule$i"] = (prioritised_rule.rule, reform)
end
return child_algos
end
# Implementation of the strong branching API.
struct StrongBranchingPhaseContext <: Branching.AbstractStrongBrPhaseContext
phase_params::BranchingPhase
units_to_restore_for_conquer::UnitsUsage
end
Branching.get_score(ph::StrongBranchingPhaseContext) = ph.phase_params.score
Branching.get_conquer(ph::StrongBranchingPhaseContext) = ph.phase_params.conquer_algo
Branching.get_units_to_restore_for_conquer(ph::StrongBranchingPhaseContext) = ph.units_to_restore_for_conquer
Branching.get_max_nb_candidates(ph::StrongBranchingPhaseContext) = ph.phase_params.max_nb_candidates
function new_phase_context(::Type{StrongBranchingPhaseContext}, phase::BranchingPhase, reform, _)
units_to_restore_for_conquer = collect_units_to_restore!(phase.conquer_algo, reform)
return StrongBranchingPhaseContext(phase, units_to_restore_for_conquer)
end
struct StrongBranchingContext{
PhaseContext<:Branching.AbstractStrongBrPhaseContext,
SelectionCriterion<:Branching.AbstractSelectionCriterion
} <: Branching.AbstractStrongBrContext
phases::Vector{PhaseContext}
rules::Vector{Branching.PrioritisedBranchingRule}
selection_criterion::SelectionCriterion
int_tol::Float64
end
Branching.get_selection_nb_candidates(ctx::StrongBranchingContext) = Branching.get_max_nb_candidates(first(ctx.phases))
Branching.get_rules(ctx::StrongBranchingContext) = ctx.rules
Branching.get_selection_criterion(ctx::StrongBranchingContext) = ctx.selection_criterion
Branching.get_int_tol(ctx::StrongBranchingContext) = ctx.int_tol
Branching.get_phases(ctx::StrongBranchingContext) = ctx.phases
function branching_context_type(algo::StrongBranching)
select_crit_type = typeof(algo.selection_criterion)
if algo.verbose
return BranchingPrinter{StrongBranchingContext{PhasePrinter{StrongBranchingPhaseContext},select_crit_type}}
end
return StrongBranchingContext{StrongBranchingPhaseContext,select_crit_type}
end
function new_context(
::Type{StrongBranchingContext{PhaseContext, SelectionCriterion}}, algo::StrongBranching, reform
) where {PhaseContext<:Branching.AbstractStrongBrPhaseContext,SelectionCriterion<:Branching.AbstractSelectionCriterion}
if isempty(algo.rules)
error("Strong branching: no branching rule is defined.")
end
if isempty(algo.phases)
error("Strong branching: no branching phase is defined.")
end
phases = map(((i, phase),) -> new_phase_context(PhaseContext, phase, reform, i), enumerate(algo.phases))
return StrongBranchingContext(
phases, algo.rules, algo.selection_criterion, algo.int_tol
)
end
function Branching.eval_child_of_candidate!(child, phase::Branching.AbstractStrongBrPhaseContext, env, reform, input)
child_state = OptimizationState(getmaster(reform))
child.conquer_output = child_state
global_primal_handler = Branching.get_global_primal_handler(input)
update_ip_primal_bound!(child_state, get_global_primal_bound(global_primal_handler))
if !ip_gap_closed(child_state)
units_to_restore = Branching.get_units_to_restore_for_conquer(phase)
restore_from_records!(units_to_restore, child.records)
conquer_input = ConquerInputFromSb(global_primal_handler, child, units_to_restore)
child.conquer_output = run!(Branching.get_conquer(phase), env, reform, conquer_input)
child.ip_dual_bound = get_lp_dual_bound(child.conquer_output)
for sol in get_ip_primal_sols(child.conquer_output)
store_ip_primal_sol!(global_primal_handler, sol)
end
TreeSearch.set_records!(child, create_records(reform))
end
# Store new primal solutions found during the evaluation of the child.
for sol in get_ip_primal_sols(child_state)
store_ip_primal_sol!(global_primal_handler, sol)
end
return
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 1350 | struct DivideOutput{N} <: Branching.AbstractDivideOutput
children::Vector{N}
end
Branching.get_children(output::DivideOutput) = output.children
function get_extended_sol(reform, opt_state)
return get_best_lp_primal_sol(opt_state)
end
function get_original_sol(reform, opt_state)
extended_sol = get_best_lp_primal_sol(opt_state)
master = getmaster(reform)
original_sol = nothing
if !isnothing(extended_sol)
original_sol = if MathProg.projection_is_possible(master)
proj_cols_on_rep(extended_sol)
else
get_best_lp_primal_sol(opt_state) # it means original_sol equals extended_sol(requires discussion)
end
end
return original_sol
end
function Branching.projection_on_master_is_possible(::Branching.AbstractDivideContext, reform)
return MathProg.projection_is_possible(getmaster(reform))
end
function run!(algo::AlgoAPI.AbstractDivideAlgorithm, env::Env, reform::Reformulation, input::Branching.AbstractDivideInput)
ctx = new_context(branching_context_type(algo), algo, reform)
conquer_opt_state = Branching.get_conquer_opt_state(input)
extended_sol = get_extended_sol(reform, conquer_opt_state)
original_sol = get_original_sol(reform, conquer_opt_state)
return Branching.run_branching!(ctx, env, reform, input, extended_sol, original_sol)
end | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 2672 | struct BranchingPrinter{StrongBrContext<:Branching.AbstractStrongBrContext} <: Branching.AbstractStrongBrContext
inner::StrongBrContext
end
Branching.get_rules(ctx::BranchingPrinter) = Branching.get_rules(ctx.inner)
Branching.get_int_tol(ctx::BranchingPrinter) = Branching.get_int_tol(ctx.inner)
Branching.get_selection_criterion(ctx::BranchingPrinter) = Branching.get_selection_criterion(ctx.inner)
Branching.get_selection_nb_candidates(ctx::BranchingPrinter) = Branching.get_selection_nb_candidates(ctx.inner)
Branching.get_phases(ctx::BranchingPrinter) = Branching.get_phases(ctx.inner)
struct PhasePrinter{PhaseContext<:Branching.AbstractStrongBrPhaseContext} <: Branching.AbstractStrongBrPhaseContext
inner::PhaseContext
phase_index::Int
end
Branching.get_max_nb_candidates(ctx::PhasePrinter) = Branching.get_max_nb_candidates(ctx.inner)
Branching.get_score(ctx::PhasePrinter) = Branching.get_score(ctx.inner)
function new_context(
::Type{BranchingPrinter{StrongBrContext}}, algo::AlgoAPI.AbstractDivideAlgorithm, reform
) where {StrongBrContext<:Branching.AbstractStrongBrContext}
inner_ctx = new_context(StrongBrContext, algo, reform)
return BranchingPrinter(inner_ctx)
end
function new_phase_context(
::Type{PhasePrinter{PhaseContext}}, phase, reform, phase_index
) where {PhaseContext<:Branching.AbstractStrongBrPhaseContext}
inner_ctx = new_phase_context(PhaseContext, phase, reform, phase_index)
return PhasePrinter(inner_ctx, phase_index)
end
function Branching.perform_branching_phase!(candidates, cand_children, phase::PhasePrinter, env, reform, input)
println("**** Strong branching phase ", phase.phase_index, " is started *****");
scores = Branching.perform_branching_phase_inner!(cand_children, phase, env, reform, input)
for (candidate, children, score) in Iterators.zip(candidates, cand_children, scores)
@printf "SB phase %i branch on %+10s" phase.phase_index Branching.getdescription(candidate)
@printf " (lhs=%.4f) : [" Branching.get_lhs(candidate)
for (node_index, node) in enumerate(children)
node_index > 1 && print(",")
@printf "%10.4f" getvalue(get_lp_primal_bound(node.conquer_output))
end
@printf "], score = %10.4f\n" score
end
return scores
end
Branching.eval_child_of_candidate!(node, phase::PhasePrinter, env, reform) =
Branching.eval_child_of_candidate!(node, phase.inner, env, reform)
Branching.get_units_to_restore_for_conquer(phase::PhasePrinter) =
Branching.get_units_to_restore_for_conquer(phase.inner)
Branching.get_conquer(phase::PhasePrinter) = Branching.get_conquer(phase.inner) | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 1277 |
### WIP
### Node for the strong branching (Goal: decouple strong branching from tree search)
### TODO: transform into a very light node dedicated to the strong branching algorithm.
### This light node will contain information to generate the real node of the tree search.
mutable struct SbNode <: TreeSearch.AbstractNode
depth::Int
# Receives the current incumbent primal bound of the B&B tree and will be updated using
# the output of the conquer algorithms called by the strong branching.
# There information are printed by the StrongBranchingPrinter.
# These information will be then transfered to the B&B algorithm when instantating the
# node of the tree search.
conquer_output::Union{Nothing, OptimizationState}
branchdescription::String
ip_dual_bound::Bound
records::Records
function SbNode(
depth, branch_description::String, ip_dual_bound::Bound, records::Records
)
return new(depth, nothing, branch_description, ip_dual_bound, records)
end
end
getdepth(n::SbNode) = n.depth
TreeSearch.set_records!(n::SbNode, records) = n.records = records
TreeSearch.get_branch_description(n::SbNode) = n.branchdescription
TreeSearch.isroot(n::SbNode) = false
Branching.isroot(n::SbNode) = TreeSearch.isroot(n) | Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 4356 | struct ProductScore <: Branching.AbstractBranchingScore end
function Branching.compute_score(::ProductScore, children, input)
parent = Branching.get_conquer_opt_state(input)
parent_lp_dual_bound = get_lp_dual_bound(parent)
parent_ip_primal_bound = get_ip_primal_bound(parent)
children_lp_primal_bounds = get_lp_primal_bound.(getfield.(children, Ref(:conquer_output)))
return _product_score(parent_lp_dual_bound, parent_ip_primal_bound, children_lp_primal_bounds)
end
struct TreeDepthScore <: Branching.AbstractBranchingScore end
function Branching.compute_score(::TreeDepthScore, children, input)
parent = Branching.get_conquer_opt_state(input)
parent_lp_dual_bound = get_lp_dual_bound(parent)
parent_ip_primal_bound = get_ip_primal_bound(parent)
children_lp_primal_bounds = get_lp_primal_bound.(getfield.(children, :conquer_output))
return _tree_depth_score(parent_lp_dual_bound, parent_ip_primal_bound, children_lp_primal_bounds)
end
# TODO : this method needs code documentation & context
# TODO : unit tests
function _product_score(
parent_lp_dual_bound,
parent_ip_primal_bound,
children_lp_primal_bounds::Vector
)
# TO DO : we need to mesure the gap to the cut-off value
parent_delta = ColunaBase.diff(parent_ip_primal_bound, parent_lp_dual_bound)
all_branches_above_delta = true
deltas = zeros(Float64, length(children_lp_primal_bounds))
for (i, child_lp_primal_bound) in enumerate(children_lp_primal_bounds)
node_delta = ColunaBase.diff(child_lp_primal_bound, parent_lp_dual_bound)
if node_delta < parent_delta
all_branches_above_delta = false
end
deltas[i] = node_delta
end
score = 1.0
if isempty(deltas)
score = parent_delta * parent_delta
elseif length(deltas) == 1
score = parent_delta
else
sort!(deltas)
for (delta_index, node_delta) in enumerate(deltas)
if node_delta > parent_delta && (!all_branches_above_delta || delta_index > 2)
node_delta = parent_delta
end
node_delta = max(node_delta, 1e-6) # TO DO : use tolerance here
if (delta_index <= 2)
score *= node_delta
else
score *= node_delta / parent_delta
end
end
end
return score
end
# TODO : this method needs code documentation & context
# TODO ; unit tests
function _number_of_leaves(gap::Float64, deltas::Vector{Float64})
inf::Float64 = 0.0
sup::Float64 = 1e20
mid::Float64 = 0.0
for _ in 1:100
mid = (inf + sup) / 2.0
if sup - inf < sup / 1000000
break
end
exp::Float64 = 0.0
for delta in deltas
exp += mid^(-delta / gap)
end
if exp < 1.0
sup = mid
else
inf = mid
end
if mid > 0.999e20
return -1
end
end
return mid
end
# TODO : this method needs code documentation & context
# TODO : this method needs unit tests
function _tree_depth_score(
parent_lp_dual_bound,
parent_ip_primal_bound,
children_lp_primal_bounds
)
nb_children = length(children_lp_primal_bounds)
if iszero(nb_children)
return 0.0
end
# TO DO : we need to mesure the gap to the cut-off value
parent_delta = ColunaBase.diff(parent_ip_primal_bound, parent_lp_dual_bound)
deltas = zeros(Float64, nb_children)
nb_zero_deltas = 0
for (i, child_lp_primal_bound) in enumerate(children_lp_primal_bounds)
node_delta = ColunaBase.diff(child_lp_primal_bound, parent_lp_dual_bound)
if node_delta < 1e-6 # TO DO : use tolerance here
nb_zero_deltas += 1
end
deltas[i] = min(parent_delta, node_delta)
end
max_delta = maximum(deltas)
if nb_zero_deltas < length(deltas) && parent_delta > max_delta * 30
parent_delta = max_delta * 30
end
score = 0.0
if nb_zero_deltas == length(deltas)
score = -Inf
elseif length(deltas) == 1
score = -parent_delta / deltas[1]
else
numleaves = _number_of_leaves(parent_delta, deltas)
if numleaves < 0
score = -Inf
else
score = -log(numleaves) / log(length(deltas))
end
end
return score
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
|
[
"MPL-2.0"
] | 0.8.1 | 828c61e9434b6af5f7908e42aacd17de35f08482 | code | 2161 |
# Criterion 1
"""
Select the branching candidates that have been generated first (sort by `local_id`).
"""
struct FirstFoundCriterion <: Branching.AbstractSelectionCriterion end
function Branching.select_candidates!(
candidates::Vector{C}, ::FirstFoundCriterion, max_nb_candidates::Int
) where {C <: Branching.AbstractBranchingCandidate}
sort!(candidates, by = c -> Branching.get_local_id(c))
if length(candidates) > max_nb_candidates
resize!(candidates, max_nb_candidates)
end
return candidates
end
# Criterion 2
"""
Select the most fractional branching candidates.
"""
struct MostFractionalCriterion <: Branching.AbstractSelectionCriterion end
function Branching.select_candidates!(
candidates::Vector{C}, ::MostFractionalCriterion, max_nb_candidates::Int
) where {C <: Branching.AbstractBranchingCandidate}
sort!(candidates, rev = true, by = c -> dist_to_int(Branching.get_lhs(c)))
if length(candidates) > max_nb_candidates
resize!(candidates, max_nb_candidates)
end
return candidates
end
# Criterion 3
"""
Select the least fractional branching candidates
"""
struct LeastFractionalCriterion <: Branching.AbstractSelectionCriterion end
function Branching.select_candidates!(
candidates::Vector{C}, ::LeastFractionalCriterion, max_nb_candidates::Int
) where {C <: Branching.AbstractBranchingCandidate}
sort!(candidates, by = c -> dist_to_int(Branching.get_lhs(c)))
if length(candidates) > max_nb_candidates
resize!(candidates, max_nb_candidates)
end
return candidates
end
# Criterion 4
"""
Select the candidate with the smallest distance to the closest non-zero integer (often used in diving).
"""
struct ClosestToNonZeroIntegerCriterion <: Branching.AbstractSelectionCriterion end
function Branching.select_candidates!(
candidates::Vector{C}, ::ClosestToNonZeroIntegerCriterion, max_nb_candidates::Int
) where {C <: Branching.AbstractBranchingCandidate}
sort!(candidates, by = c -> dist_to_non_zero_int(Branching.get_lhs(c)))
if length(candidates) > max_nb_candidates
resize!(candidates, max_nb_candidates)
end
return candidates
end
| Coluna | https://github.com/atoptima/Coluna.jl.git |
Subsets and Splits