licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 1910 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
export RightStringWeight
"""
`RightStringWeight(x)`
| Set | ``\\oplus`` | ``\\otimes`` | ``\\bar{0}`` | ``\\bar{1}`` |
|:-----------------------:|:----------------------:|:--------------:|:------------:|:------------:|
|``L^*\\cup\\{\\infty\\}``| longest common suffix | ``\\cdot`` |``\\infty`` |``\\epsilon`` |
where ``L^*`` is Kleene closure of the set of characters ``L`` and ``\\epsilon`` the empty string.
"""
struct RightStringWeight <: Semiring
x::String
iszero::Bool
end
RightStringWeight(x::String) = RightStringWeight(x,false)
reversetype(::Type{S}) where {S <: RightStringWeight} = LeftStringWeight
zero(::Type{RightStringWeight}) = RightStringWeight("",true)
one(::Type{RightStringWeight}) = RightStringWeight("",false)
# longest common suffix
function lcs(a::RightStringWeight,b::RightStringWeight)
if a.iszero
return b
elseif b.iszero
return a
else
cnt = 0
for x in zip(reverse(a.x),reverse(b.x))
if x[1] == x[2]
cnt +=1
else
break
end
end
return RightStringWeight(a.x[end-cnt+1:end])
end
end
*(a::T, b::T) where {T<:RightStringWeight}=
((a.iszero) | (b.iszero)) ? zero(T) : T(a.x * b.x)
+(a::RightStringWeight, b::RightStringWeight) = lcs(a,b)
function /(a::RightStringWeight, b::RightStringWeight)
if b == zero(RightStringWeight)
throw(ErrorException("Cannot divide by zero"))
elseif a == zero(RightStringWeight)
return a
end
return RightStringWeight(a.x[1:end-length(b.x)])
end
reverse(a::RightStringWeight) = LeftStringWeight(reverse(a.x),a.iszero)
#properties
isidempotent(::Type{W}) where {W <: RightStringWeight} = true
isright(::Type{W}) where {W <: RightStringWeight}= true
isweaklydivisible(::Type{W}) where {W <: RightStringWeight}= true
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 2005 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
# Semiring definitions
abstract type Semiring end
import Base: zero, one, +, *, /
import Base: isapprox, parse, reverse, convert, get
isapprox(a::T,b::T) where { T <: Semiring } = isapprox(a.x,b.x)
reversetype(::Type{S}) where {S <: Semiring} = S
reverse(a::S) where { S <: Semiring } = a
reverseback(a::S) where { S <: Semiring } = a
get(a::S) where { S <: Semiring } = a.x
# properties
"""
`isleft(::Type{W})`
Check if the semiring type `W` satisfies:
``\\forall a,b,c \\in \\mathbb{W} : c \\otimes(a \\oplus b) = c \\otimes a \\oplus c \\otimes b``
"""
isleft(::Type{W}) where {W} = false # ∀ a,b,c: c*(a+b) = c*a + c*b
"""
`isright(::Type{W})`
Check if the semiring type `W` satisfies:
``\\forall a,b,c \\in \\mathbb{W} : c \\otimes(a \\oplus b) = a \\otimes c \\oplus b \\otimes c``
"""
isright(::Type{W}) where {W} = false # ∀ a,b,c: c*(a+b) = a*c + b*c
"""
`isweaklydivisible(::Type{W})`
Check if the semiring type `W` satisfies:
``\\forall a,b \\in \\mathbb{W} \\ \\text{s.t.} \\ a \\oplus b \\neq \\bar{0} \\ \\exists z : x = (x \\oplus y ) \\otimes z``
"""
isweaklydivisible(::Type{W}) where {W} = false # a+b ≂̸ 0: ∃ z s.t. x = (x+y)*z
"""
`ispath(::Type{W})`
Check if the semiring type `W` satisfies:
``\\forall a,b \\in \\mathbb{W}: a \\oplus b = a \\lor a \\oplus b = b``
"""
ispath(::Type{W}) where {W} = false # ∀ a,b: a+b = a or a+b=b
"""
`isidempotent(::Type{W})`
Check if the semiring type `W` satisfies:
``\\forall a \\in \\mathbb{W}: a \\oplus a = a``
"""
isidempotent(::Type{W}) where {W} = false # ∀ a: a+a = a
"""
`iscommulative(::Type{W})`
Check if the semiring type `W` satisfies:
``\\forall a,b \\in \\mathbb{W}: a \\otimes b = b \\otimes a``
"""
iscommulative(::Type{W}) where {W} = false # ∀ a,b: a*b = b*a
iscomplete(::Type{W}) where {W} = false
Base.show(io::IO, T::Semiring) = Base.show(io, T.x)
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 1475 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
export TropicalWeight
"""
`TropicalWeight(x)`
| Set | ``\\oplus``| ``\\otimes`` | ``\\bar{0}`` | ``\\bar{1}`` |
|:-----------------------------------:|:----------:|:--------------:|:------------:|:------------:|
|``\\mathbb{R}\\cup\\{\\pm\\infty\\}``| ``\\min`` | ``+`` |``\\infty`` | ``0`` |
"""
struct TropicalWeight{T <: AbstractFloat} <: Semiring
x::T
end
zero(::Type{TropicalWeight{T}}) where T = TropicalWeight{T}(T(Inf))
one(::Type{TropicalWeight{T}}) where T = TropicalWeight{T}(zero(T))
*(a::TropicalWeight{T}, b::TropicalWeight{T}) where {T <: AbstractFloat} = TropicalWeight{T}(a.x + b.x)
+(a::TropicalWeight{T}, b::TropicalWeight{T}) where {T <: AbstractFloat} = TropicalWeight{T}( min(a.x,b.x) )
/(a::TropicalWeight{T}, b::TropicalWeight{T}) where {T <: AbstractFloat} = TropicalWeight{T}( a.x-b.x )
# parsing
parse(::Type{S},str) where {T, S <: TropicalWeight{T}} = S(parse(T,str))
#properties
isidempotent(::Type{W}) where {W <: TropicalWeight} = true
iscommulative(::Type{W}) where {W <: TropicalWeight} = true
isleft(::Type{W}) where {W <: TropicalWeight}= true
isright(::Type{W}) where {W <: TropicalWeight}= true
isweaklydivisible(::Type{W}) where {W <: TropicalWeight}= true
iscomplete(::Type{W}) where {W <: TropicalWeight}= true
ispath(::Type{W}) where {W <: TropicalWeight}= true
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 2255 | # example from http://openfst.org/twiki/bin/view/FST/ConcatDoc
sym=Dict("$(Char(i))"=>i for i=97:122)
A = WFST(sym)
add_arc!(A,1=>2,"<eps>"=>"<eps>")
add_arc!(A,2=>2,"a"=>"p", 2)
add_arc!(A,2=>3,"b"=>"q", 3)
add_arc!(A,3=>3,"c"=>"r", 4)
add_arc!(A,3=>2,"<eps>"=>"<eps>", 5)
initial!(A,1)
final!(A,1)
final!(A,3,5)
#println(A)
B = WFST(sym)
add_arc!(B,1=>1,"a"=>"p",2)
add_arc!(B,1=>2,"b"=>"q",3)
add_arc!(B,2=>2,"c"=>"r",4)
initial!(B,1)
final!(B,2,5)
#println(A)
W,D = typeofweight(A),Int
C = A*B
#println(C)
@test size(C) == (5,10)
@test get_initial(C; single=false) == Dict(1=>one(W))
@test get_final(C; single=false) == Dict(5=>W(5))
@test C[1][1] == Arc{W,D}(0,0,one(W),2)
@test C[1][2] == Arc{W,D}(0,0,one(W),4)
@test C[2][1] == Arc{W,D}(sym["a"],sym["p"],W(2),2)
@test C[2][2] == Arc{W,D}(sym["b"],sym["q"],W(3),3)
@test C[3][1] == Arc{W,D}(sym["c"],sym["r"],W(4),3)
@test C[3][2] == Arc{W,D}(0,0,W(5),2)
@test C[3][3] == Arc{W,D}(0,0,W(5),4)
@test C[4][1] == Arc{W,D}(sym["a"],sym["p"],W(2),4)
@test C[4][2] == Arc{W,D}(sym["b"],sym["q"],W(3),5)
@test C[5][1] == Arc{W,D}(sym["c"],sym["r"],W(4),5)
B = WFST(sym,["z"]); initial!(B,1); final!(B,2)
@test_throws ErrorException A*B
B = WFST(["z"],sym); initial!(B,1); final!(B,2)
@test_throws ErrorException A*B
B = WFST(sym; W = ProbabilityWeight{Float32}); initial!(B,1); final!(B,2)
@test_throws ErrorException A*B
###
### with deterministic acyclic WFSTs
W = ProbabilityWeight{Float64}
isym=Dict("$(Char(i))"=>i for i=97:122)
osym=Dict(Char(i)=>i for i=97:122)
A = WFST(isym,osym; W = W)
add_arc!(A,1=>2,"a"=>'a',1)
add_arc!(A,2=>4,"b"=>'b',2)
add_arc!(A,2=>3,"c"=>'c',3)
add_arc!(A,3=>4,"d"=>'d',4)
add_arc!(A,1=>4,"e"=>'e',5)
initial!(A,1)
final!(A,3,6)
final!(A,4,7)
println(A)
B = WFST(isym,osym; W = W)
add_arc!(B,1=>2,"x"=>'x',8)
add_arc!(B,1=>3,"y"=>'y',9)
initial!(B,1)
final!(B,2,10)
final!(B,3,11)
println(B)
pas = collectpaths(A)
pbs = collectpaths(B)
C = A*B
pcs = collectpaths(C)
@test all([pa*pb in pcs for pa in pas, pb in pbs])
C = B*A
pcs = collectpaths(C)
@test all([pb*pa in pcs for pa in pas, pb in pbs])
C = A*A
pcs = collectpaths(C)
@test all([pa*pa in pcs for pa in pas, pb in pbs])
C = B*B
pcs = collectpaths(C)
@test all([pb*pb in pcs for pa in pas, pb in pbs])
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 986 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
fst = WFST(["a","b","c"],["p","q","r"])
add_arc!(fst,1,1,"a","p",2)
add_arc!(fst,1,2,"b","q",3)
add_arc!(fst,2,2,"c","r",4)
initial!(fst,1)
final!(fst,2,5)
fst_plus = closure(fst; star=false)
println(fst_plus)
fst_star = closure(fst; star=true)
println(fst_star)
@test isinitial(fst_star,3)
@test isfinal(fst_star,3)
@test isfinal(fst_star,2)
W = typeofweight(fst)
ilabels=["a","b","c"]
o1,w1 = fst(ilabels)
@test o1 == ["p","q","r"]
@test w1 == W(2)*W(3)*W(4)*W(5)
o2,w2 = fst_star([ilabels;ilabels])
@test o2 == [o1;o1]
@test w2 == w1*w1
o3,w3 = fst_star([ilabels;ilabels])
@test o3 == o3
@test w2 == w3
o,w = fst([ilabels;ilabels])
@test w == zero(W)
# fst star accepts empty string and returns the same with weight one
o,w = fst_star(String[])
@test o == String[]
@test w == one(W)
# fst plus does not accept empty string
o,w = fst_plus(String[])
@test w == zero(W)
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 4048 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
# compose
# using ex from Mohri et al. Speech recognition with FiniteStateTransducers
L = String
W = TropicalWeight{Float32}
A = WFST(["a","b","c"]; W = W)
add_arc!(A, 1, 2, "a", "b", 0.1)
add_arc!(A, 2, 2, "c", "a", 0.3)
add_arc!(A, 2, 4, "a", "a", 0.4)
add_arc!(A, 1, 3, "b", "a", 0.2)
add_arc!(A, 3, 4, "b", "b", 0.2)
final!(A, 4, 0.6)
initial!(A, 1)
#println(A)
size_A = size(A)
B = WFST(get_isym(A); W = W)
add_arc!(B, 1, 2, "b", "c", 0.3)
add_arc!(B, 2, 3, "a", "b", 0.4)
add_arc!(B, 3, 3, "a", "b", 0.6)
final!(B, 3, 0.7)
initial!(B, 1)
size_B = size(B)
#println(B)
C = ∘(A,B; filter=Trivial)
#println(C)
@test size(A) == size_A
@test size(B) == size_B
@test isinitial(C,1)
@test isfinal(C,4)
@test get_weight(C,4) ≈ W(1.3)
@test get_weight(C[1][1]) ≈ W(0.4)
@test get_weight(C[2][1]) ≈ W(0.7)
@test get_weight(C[2][2]) ≈ W(0.8)
@test get_weight(C[3][1]) ≈ W(0.9)
@test get_weight(C[3][2]) ≈ W(1.0)
@test size(C) == (4,5)
# using ex from http://openfst.org/twiki/bin/view/FST/ComposeDoc
L = String
W = TropicalWeight{Float64}
A = WFST(["a","c"],["q","r","s"]; W=W)
add_arc!(A, 1, 2, "a", "q", 1.0)
add_arc!(A, 2, 2, "c", "s", 1.0)
add_arc!(A, 1, 3, "a", "r", 2.5)
initial!(A,1)
final!(A,2,0)
final!(A,3,2.5)
#print(A)
W = TropicalWeight{Float64}
B = WFST(get_osym(A),["f","h","g","j"]; W=W)
add_arc!(B, 1, 2, "q", "f", 1.0)
add_arc!(B, 1, 3, "r", "h", 3.0)
add_arc!(B, 2, 3, "s", "g", 2.5)
add_arc!(B, 3, 3, "s", "j", 1.5)
initial!(B,1)
final!(B,3,2)
#print(B)
C = ∘(A,B,filter=Trivial)
#println(C)
@test isinitial(C,1)
@test isfinal(C,3)
@test isfinal(C,4)
@test get_weight(C,3) ≈ W(4.5)
@test get_weight(C,4) ≈ W(2.0)
@test get_weight(C[1][1]) ≈ W(2.0)
@test get_ilabel(C,1,1) == "a"
@test get_olabel(C,1,1) == "f"
@test get_nextstate(C[1][1]) == 2
@test get_weight(C[1][2]) ≈ W(5.5)
@test get_ilabel(C,1,2) == "a"
@test get_olabel(C,1,2) == "h"
@test get_nextstate(C[1][2]) == 3
@test get_weight(C[2][1]) ≈ W(3.5)
@test get_ilabel(C,2,1) == "c"
@test get_olabel(C,2,1) == "g"
@test get_nextstate(C[2][1]) == 4
@test get_weight(C[4][1]) ≈ W(2.5)
@test get_ilabel(C,4,1) == "c"
@test get_olabel(C,4,1) == "j"
@test get_nextstate(C[4][1]) == 4
@test size(C) == (4,4)
# using ex fig.8 (with different weights) from Mohri et al. Speech recognition with FiniteStateTransducers
# with epsilon symbols
A = txt2fst("openfst/A.fst", "openfst/sym.txt")
B = txt2fst("openfst/B.fst", "openfst/sym.txt")
# TODO add tests
# for the moment we test equivalence of fst with sorted weights
C = ∘(A,B;filter=Trivial)
C_openfst = txt2fst("openfst/C_trivial.fst", "openfst/sym.txt")
@test size(C) == size(C_openfst)
w = sort([get_weight(a).x for a in get_arcs(C)])
w_openfst = sort([get(get_weight(a)) for a in get_arcs(C_openfst)])
@test all(w .== w_openfst)
@test all(sort([values(C.finals)...]) .== sort([values(C_openfst.finals)...] ))
#println(C)
#println(C_openfst)
C = ∘(A,B;filter=EpsMatch)
C_openfst = txt2fst("openfst/C_match.fst", "openfst/sym.txt")
@test size(C) == size(C_openfst)
w = sort([get_weight(a).x for a in get_arcs(C)])
w_openfst = sort([get(get_weight(a)) for a in get_arcs(C_openfst)])
@test all(w .== w_openfst)
@test all(sort([values(C.finals)...]) .== sort([values(C_openfst.finals)...] ))
#println(C)
#println(C_openfst)
C = ∘(A,B;filter=EpsSeq)
C_openfst = txt2fst("openfst/C_sequence.fst", "openfst/sym.txt")
@test size(C) == size(C_openfst)
w = sort([get_weight(a).x for a in get_arcs(C)])
w_openfst = sort([get(get_weight(a)) for a in get_arcs(C_openfst)])
@test all(w .== w_openfst)
@test all(sort([values(C.finals)...]) .== sort([values(C_openfst.finals)...] ))
#println(C)
#println(C_openfst)
##
L = txt2fst("openfst/L.fst", "openfst/chars.txt", "openfst/words.txt")
#println(L)
T = txt2fst("openfst/T.fst", "openfst/words.txt", "openfst/words.txt")
#println(T)
LT = ∘(L,T;filter=EpsSeq)
#println(LT)
@test size(LT) ==(11,10)
@test_throws ErrorException T∘L # since A.osym != A.isym
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 538 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
sym=Dict("$(Char(i))"=>i for i=97:122)
A = WFST(sym)
W = typeofweight(A)
add_arc!(A,1,2,"a","a",1)
add_arc!(A,1,3,"a","a",2)
add_arc!(A,2,2,"b","b",3)
add_arc!(A,3,3,"b","b",3)
add_arc!(A,2,4,"c","c",5)
add_arc!(A,3,4,"d","d",6)
initial!(A,1)
final!(A,4)
println(A)
@test is_deterministic(A) == false
@test is_acceptor(A)
D = determinize_fsa(A)
#println(D)
@test is_deterministic(D)
@test size(D) == (3,4)
# TODO add more tests
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 3416 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
# DFS iterator test
fst = WFST(Dict(1=>1))
add_arc!(fst, 1, 2, 1, 1, 1.0)
add_arc!(fst, 1, 5, 1, 1, 1.0)
add_arc!(fst, 1, 3, 1, 1, 1.0)
add_arc!(fst, 2, 6, 1, 1, 1.0)
add_arc!(fst, 2, 4, 1, 1, 1.0)
add_arc!(fst, 5, 4, 1, 1, 1.0)
add_arc!(fst, 3, 4, 1, 1, 1.0)
add_arc!(fst, 3, 7, 1, 1, 1.0)
initial!(fst,1)
println(fst)
println("DFS un-folded")
println("Starting from state 1")
dfs = FiniteStateTransducers.DFS(fst,1)
dfs_it = Int[]
completed_seq = Int[]
for (p,s,n,d,e,a) in dfs
println("$p,$s,$n")
if d == true
println("visiting first time $n (Gray)")
push!(dfs_it,n)
else
if e
println("completed state $s (Black)")
push!(completed_seq,s)
else
println("node $n already visited")
end
end
end
@test completed_seq == [6,4,2,5,7,3,1]
@test dfs_it == [2,6,4,5,3,7]
L = Int
fst = WFST(Dict(1=>1))
add_arc!(fst, 1, 2, 1, 1, 1.0)
add_arc!(fst, 2, 3, 1, 1, 1.0)
add_arc!(fst, 3, 1, 1, 1, 1.0)
add_arc!(fst, 3, 4, 1, 1, 1.0)
add_arc!(fst, 4, 5, 1, 1, 1.0)
add_arc!(fst, 5, 6, 1, 1, 1.0)
add_arc!(fst, 6, 7, 1, 1, 1.0)
add_arc!(fst, 7, 8, 1, 1, 1.0)
add_arc!(fst, 8, 5, 1, 1, 1.0)
add_arc!(fst, 2, 9, 1, 1, 1.0)
initial!(fst,1)
final!(fst,8)
println(fst)
println(fst)
scc, c, v = get_scc(fst)
@test scc == [[1, 2, 3], [9], [4], [5, 6, 7, 8]]
# is_visited test
L = Int
ϵ = get_eps(L)
fst = WFST(Dict(i=>i for i=1:4))
add_arc!(fst, 1 => 2, 1=>1,rand() )
add_arc!(fst, 1 => 3, 1=>1, rand())
add_arc!(fst, 2 => 4, 1=>ϵ, rand())
add_arc!(fst, 2 => 3, ϵ=>ϵ, rand())
add_arc!(fst, 2 => 3, 2=>2, rand())
add_arc!(fst, 3 => 4, 3=>3, rand())
add_arc!(fst, 3 => 3, 4=>4, rand())
#print(fst)
@test is_acceptor(fst) == false
## test rm_state!
@test count_eps(get_ilabel,fst) == 1
@test count_eps(get_olabel,fst) == 2
rm_state!(fst,2)
#print(fst)
@test is_acceptor(fst) == true
@test count_eps(get_ilabel,fst) == 0
@test count_eps(get_olabel,fst) == 0
@test size(fst) == (3,3)
# test DFS against recursive
I = O = Int32
fst = WFST(Dict(i=>i for i =1:5),Dict(100*i=>i for i =1:5))
w = randn(5)
add_arc!(fst, 1, 1, 1, 100*1, w[1])
add_arc!(fst, 1, 2, 1, 100*1, w[1])
add_arc!(fst, 1, 3, 2, 100*2, w[2])
add_arc!(fst, 1, 4, 3, 100*3, w[3])
add_arc!(fst, 2, 4, 4, 100*4, w[4])
add_arc!(fst, 3, 4, 5, 100*5, w[5])
initial!(fst, 1)
final!(fst, 4)
# fst have all nodes connected
v = FiniteStateTransducers.recursive_dfs(fst)
@test all(v)
fst2 = deepcopy(fst)
add_arc!(fst2, 8, 10, 5, 100*5, w[5])
add_arc!(fst2, 8, 9, 5, 100*5, w[5])
add_arc!(fst2, 8, 8, 5, 100*5, w[5])
# fst2 5 < state <= 10 are not connected with previous ones
add_arc!(fst2, 3, 11, 5, 100*5, w[5])
add_arc!(fst2, 11, 12, 5, 100*5, w[5])
add_arc!(fst2, 12, 12, 5, 100*5, w[5])
final!(fst2,12,w[1])
# fst2 state >= 11 are connected with first nodes
v = FiniteStateTransducers.recursive_dfs(fst2)
@test all(v[1:4])
@test all(.!v[5:10])
@test all(v[11:end])
# testing lazy version
v = is_visited(fst)
@test all(v)
v = is_visited(fst2)
@test all(v[1:4])
@test all(.!v[5:10])
@test all(v[11:end])
v = is_visited(fst2,8)
@test all(v[8:10])
@test all(.!v[1:7])
@test all(.!v[11:end])
# connect tests
#println(fst2)
## testing connect
fst3 = connect(fst2)
@test all(is_visited(fst3))
@test size(fst3) == (6,9)
@test isfinal(fst3,6)
@test isfinal(fst3,4)
@test isinitial(fst3,1)
@test get(get_weight(fst3,6)) ≈ w[1]
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 928 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
# inverse
ilab = 1:10
olab = [randstring(1) for i =1:10]
fst = WFST(ilab,olab)
add_arc!(fst, 1, 1, ilab[1] , olab[1] , rand())
add_arc!(fst, 1, 2, ilab[2] , olab[2] , rand())
add_arc!(fst, 2, 3, ilab[3] , olab[3] , rand())
add_arc!(fst, 2, 2, ilab[4] , olab[4] , rand())
add_arc!(fst, 3, 3, ilab[5] , olab[5] , rand())
add_arc!(fst, 3, 4, ilab[6] , olab[6] , rand())
add_arc!(fst, 4, 4, ilab[7] , olab[7] , rand())
add_arc!(fst, 4, 5, ilab[8] , olab[8] , rand())
add_arc!(fst, 5, 5, ilab[9] , olab[9] , rand())
add_arc!(fst, 5, 6, ilab[10], olab[10], rand())
initial!(fst,1)
final!(fst,6,rand())
ifst = inv(fst)
@test all([a in ilab for a in get_oalphabet(ifst)])
@test all([a in olab for a in get_ialphabet(ifst)])
@test isfinal(ifst,6)
@test isinitial(ifst,1)
@test get_weight(fst,6) == get_weight(ifst,6)
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 668 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
isym=txt2sym("openfst/sym.txt")
S = String
fst = txt2fst("openfst/A.fst", isym)
#println(fst)
@test size(fst) == (5,4)
fst = txt2fst("openfst/B.fst", "openfst/sym.txt")
#println(fst)
@test size(fst) == (4,3)
fst = txt2fst("openfst/C_trivial.fst", "openfst/sym.txt")
#println(fst)
@test size(fst) == (8,11)
fst = txt2fst("openfst/L.fst", "openfst/chars.txt", "openfst/words.txt")
@test size(fst) == (14,18)
#println(fst)
## TODO add many more tests
dot = fst2dot(fst)
# fst2pdf needs dot to be installed
if success(`which dot`)
fst2pdf(fst,"fst.pdf")
end
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 2050 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
# Path constructor
isym = ["a","b","c","d"];
osym = ["α","β","γ","δ"];
W = ProbabilityWeight{Float32};
p = Path(isym,osym,["a","b","c"] => ["α","β","γ"], one(W))
@test get_weight(p) == one(W)
FiniteStateTransducers.update_path!(p, 4, 4, 0.5)
@test get_weight(p) == W(0.5)
println(p)
p = Path(isym,["a","b","c"] => ["c","b","a"])
println(p)
@test typeofweight(p) == TropicalWeight{Float32}
@test get_weight(p) == one(TropicalWeight{Float32})
isym = ["a","b","c","d","e"];
osym = ["α","β","γ","δ","ε"];
W = ProbabilityWeight{Float32}
fst = WFST(isym,osym; W=W)
w = randn(6)
add_arc!(fst, 1, 2, "a", "α", w[1])
add_arc!(fst, 1, 3, "b", "β", w[2])
add_arc!(fst, 1, 4, "c", "γ", w[3])
add_arc!(fst, 2, 4, "d", "δ", w[4])
add_arc!(fst, 3, 4, "e", "ε", w[5])
initial!(fst, 1)
wf = rand()
final!(fst, 4, wf)
paths = collectpaths(fst)
for p in get_paths(fst)
@test p in paths
end
@test length(paths) == 3
@test all( sort(get.(get_weight.(paths))) .≈ sort([w[1]*w[4]*wf, w[2]*w[5]*wf, w[3]*wf]) )
@test get_isequence(paths[1]) == ["c"]
@test get_isequence(paths[2]) == ["a","d"]
@test get_isequence(paths[3]) == ["b","e"]
@test get_osequence(paths[1]) == ["γ"]
@test get_osequence(paths[2]) == ["α","δ"]
@test get_osequence(paths[3]) == ["β","ε"]
# empty fst
fst = WFST(Dict(1=>1); W = W)
@test_throws ErrorException collectpaths(fst)
@test_throws ErrorException get_paths(fst)
# with eps
I = O = Int32
isym = Dict(i=>i for i=1:5)
osym = Dict(100*i=>i for i=1:5)
fst = WFST(isym,osym; W = ProbabilityWeight{Float32})
w = randn(6)
ϵ = get_eps(I)
add_arc!(fst, 1, 2, 1, 100*1, w[1])
add_arc!(fst, 1, 3, 2, ϵ, w[2])
add_arc!(fst, 1, 4, ϵ, ϵ, w[3])
add_arc!(fst, 2, 4, 4, 100*4, w[4])
add_arc!(fst, 3, 4, 5, 100*5, w[5])
initial!(fst, 1)
wf = rand()
final!(fst, 4, wf)
paths = collectpaths(fst)
for p in get_paths(fst)
@test p in paths
end
@test all( sort(get.(get_weight.(paths))) .≈ sort([w[1]*w[4]*wf, w[2]*w[5]*wf, w[3]*wf]) )
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 1210 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
#proj
ilab = 1:10
olab = [randstring(1) for i =1:10]
fst = WFST(ilab,olab)
add_arc!(fst, 1, 1, ilab[1] , olab[1] , rand())
add_arc!(fst, 1, 2, ilab[2] , olab[2] , rand())
add_arc!(fst, 2, 3, ilab[3] , olab[3] , rand())
add_arc!(fst, 2, 2, ilab[4] , olab[4] , rand())
add_arc!(fst, 3, 3, ilab[5] , olab[5] , rand())
add_arc!(fst, 3, 4, ilab[6] , olab[6] , rand())
add_arc!(fst, 4, 4, ilab[7] , olab[7] , rand())
add_arc!(fst, 4, 5, ilab[8] , olab[8] , rand())
add_arc!(fst, 5, 5, ilab[9] , olab[9] , rand())
add_arc!(fst, 5, 6, ilab[10], olab[10], rand())
initial!(fst,1)
final!(fst,6,rand())
pfst = proj(get_ilabel,fst)
@test isfinal(pfst,6)
@test isinitial(pfst,1)
@test is_acceptor(pfst)
@test get_weight(fst,6) == get_weight(pfst,6)
@test all([a in ilab for a in get_oalphabet(pfst)])
@test all([a in ilab for a in get_ialphabet(pfst)])
pfst = proj(get_olabel,fst)
@test isfinal(pfst,6)
@test isinitial(pfst,1)
@test is_acceptor(pfst)
@test get_weight(fst,6) == get_weight(pfst,6)
@test all([a in olab for a in get_oalphabet(pfst)])
@test all([a in olab for a in get_ialphabet(pfst)])
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 2736 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
# get_ialphabet, get_oalphabet
ilabs = randn(10)
olabs = [randstring(1) for i =1:10]
isym = Dict( s=>i for (i,s) in enumerate(ilabs))
osym = Dict( s=>i for (i,s) in enumerate(olabs))
W = TropicalWeight{Float32}
fst = WFST(isym,osym)
@test typeofweight(fst) == W
add_arc!(fst, 1, 1, ilabs[1] , olabs[1] , rand())
add_arc!(fst, 1, 2, ilabs[2] , olabs[2] , rand())
add_arc!(fst, 2, 3, ilabs[3] , olabs[3] , rand())
add_arc!(fst, 2, 2, ilabs[4] , olabs[4] , rand())
add_arc!(fst, 3, 3, ilabs[5] , olabs[5] , rand())
add_arc!(fst, 3, 4, ilabs[6] , olabs[6] , rand())
add_arc!(fst, 4, 4, ilabs[7] , olabs[7] , rand())
add_arc!(fst, 4, 5, ilabs[8] , olabs[8] , rand())
add_arc!(fst, 5, 5, ilabs[9] , olabs[9] , rand())
add_arc!(fst, 5, 6, ilabs[10], olabs[10], rand())
add_arc!(fst, 6, 6, ilabs[10], olabs[10], rand())
initial!(fst,1)
final!(fst,6)
@test length(fst.isym) == 10
@test length(fst.osym) == 10
##println(fst)
A = get_ialphabet(fst)
B = get_oalphabet(fst)
@test eltype(A) <: AbstractFloat
@test eltype(B) <: String
@test all(i in A for i in ilabs)
@test all(o in B for o in olabs)
## count_eps/has_eps/is_acceptor
isym = Dict(i=>i for i =1:5)
fst = WFST(isym)
ϵ = get_eps(Int)
add_arc!(fst, 1, 2, 5, 1, 0.5)
add_arc!(fst, 1, 3, 1, 1, rand())
add_arc!(fst, 2, 4, 1, ϵ, rand())
add_arc!(fst, 2, 3, 1, ϵ, rand())
add_arc!(fst, 2, 3, 2, 2, rand())
add_arc!(fst, 3, 4, 3, 3, rand())
add_arc!(fst, 3, 3, 4, 4, rand())
@test count_eps(get_ilabel,fst) == 0
@test count_eps(get_olabel,fst) == 2
@test has_eps(fst) == true
@test has_eps(get_ilabel,fst) == false
@test has_eps(get_olabel,fst) == true
@test is_acceptor(fst) == false
@test size(fst) == (4,7)
fst = WFST(isym)
add_arc!(fst, 1, 2, 1, 1, 0.5)
add_arc!(fst, 2, 3, 2, 2, 0.5)
@test is_acceptor(fst) == true
# is_deterministic
#ex form http://openfst.org/twiki/bin/view/FST/DeterminizeDoc
sym=Dict("$(Char(i))"=>i for i=97:122)
A = WFST(sym)
add_arc!(A,1,2,"a","p",1)
add_arc!(A,1,3,"a","q",2)
add_arc!(A,2,4,"c","r",4)
add_arc!(A,2,4,"c","r",5)
add_arc!(A,3,4,"d","s",6)
initial!(A,1)
final!(A,4)
#println(A)
@test is_deterministic(A) == false
B = WFST(sym)
add_arc!(B,1,2,"a","<eps>",1)
add_arc!(B,2,3,"c","p",4)
add_arc!(B,3,5,"<eps>","r",0)
add_arc!(B,2,4,"d","q",7)
add_arc!(B,4,5,"<eps>","s",0)
initial!(B,1)
final!(B,5)
#println(B)
@test is_deterministic(B) == true
# is_acyclic
A = WFST(sym)
add_arc!(A,1,2,"a","<eps>",1)
add_arc!(A,2,3,"c","p",4)
add_arc!(A,3,5,"<eps>","r",0)
add_arc!(A,2,4,"d","q",7)
add_arc!(A,4,5,"<eps>","s",0)
initial!(A,1)
final!(A,5)
#println(A)
@test is_acyclic(A)
add_arc!(A,5,1,"a","s",0)
@test !is_acyclic(A)
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 1277 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
fst = WFST(["a","b","c","d","e","f"])
add_arc!(fst,1=>1,"a"=>"a",2)
add_arc!(fst,1=>2,"a"=>"a",1)
add_arc!(fst,2=>3,"b"=>"b",3)
add_arc!(fst,2=>3,"c"=>"c",4)
add_arc!(fst,3=>3,"d"=>"d",5)
add_arc!(fst,3=>4,"d"=>"d",6)
add_arc!(fst,4=>4,"f"=>"f",2)
initial!(fst,1)
final!(fst,2,3)
final!(fst,4,2)
#println(fst)
sym = get_isym(fst)
W = typeofweight(fst)
D = Int
rfst = reverse(fst)
@test rfst[1][1] == Arc{W,D}(0,0,W(3),3)
@test rfst[1][2] == Arc{W,D}(0,0,W(2),5)
@test rfst[2][1] == Arc{W,D}(sym["a"],sym["a"],W(2),2)
@test rfst[3][1] == Arc{W,D}(sym["a"],sym["a"],W(1),2)
@test rfst[4][1] == Arc{W,D}(sym["b"],sym["b"],W(3),3)
@test rfst[4][2] == Arc{W,D}(sym["c"],sym["c"],W(4),3)
@test rfst[4][3] == Arc{W,D}(sym["d"],sym["d"],W(5),4)
@test rfst[5][1] == Arc{W,D}(sym["d"],sym["d"],W(6),4)
@test rfst[5][2] == Arc{W,D}(sym["f"],sym["f"],W(2),5)
@test size(rfst) == (5,9)
fst = WFST([1,2],["a","b"];W = LeftStringWeight)
add_arc!(fst,1=>2,1=>"a","ciao")
add_arc!(fst,2=>3,2=>"b","bello")
initial!(fst,1)
final!(fst,3)
rfst = reverse(fst)
#println(rfst)
seq = [1,2]
out, w = fst(seq)
outr, wr = rfst(reverse(seq))
@test out == reverse(outr)
@test w == reverse(wr)
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 827 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
# example from http://openfst.org/twiki/bin/view/FST/RmEpsilonDoc
W = TropicalWeight{Float64}
A = WFST(Dict("a"=>1,"p"=>2),W = W)
add_arc!(A,1,2,"<eps>","<eps>",1)
add_arc!(A,2,3,"a","<eps>",2)
add_arc!(A,2,3,"<eps>","p",3)
add_arc!(A,2,3,"<eps>","<eps>",4)
add_arc!(A,3,3,"<eps>","<eps>",5)
add_arc!(A,3,4,"<eps>","<eps>",6)
final!(A,4,7)
initial!(A,1)
# println(A)
B = rm_eps(A)
# println(B)
@test isinitial(B,1)
@test get_final_weight(B,1) == W(18)
@test get_initial_weight(B,1) == one(W)
@test isfinal(B,1)
@test isfinal(B,2)
@test get_final_weight(B,2) == W(13)
@test B[1][1] == FiniteStateTransducers.Arc{W,Int}(1,0,W(3),2)
@test B[1][2] == FiniteStateTransducers.Arc{W,Int}(0,2,W(4),2)
@test size(B) == (2,2)
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 937 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
using FiniteStateTransducers
using Test, Random
using DataStructures
Random.seed!(123)
@testset "FiniteStateTransducers.jl" begin
@testset "Semirings" begin
include("semirings.jl")
end
@testset "FiniteStateTransducers constructors and utils" begin
include("wfst.jl")
include("properties.jl")
include("io_wfst.jl")
end
@testset "algorithms" begin
include("paths.jl") #Paths, BFS, paths
include("dfs.jl") #rm_state, DFS, get_scc, connect, rm_eps
include("shortest_distance.jl")
include("topsort.jl")
include("rm_eps.jl")
include("closure.jl")
include("inv.jl")
include("proj.jl")
include("reverse.jl")
include("compose.jl")
include("cat.jl")
include("determinize.jl")
include("wfst2tr.jl")
end
end
#@testset "FiniteStateTransducers.jl" begin
#end
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 11070 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
for T in [Float64, Float32]
#################
# ProbabilityWeight semiring
#################
a = ProbabilityWeight(rand(T))
b = ProbabilityWeight(rand(T))
c = ProbabilityWeight(rand(T))
d = ProbabilityWeight{T}(a) # check ProbabilityWeight(x::ProbabilityWeight)
e = ProbabilityWeight{T}( ProbabilityWeight{T == Float64 ? Float32 : Float64}(get(a)) )
f = ProbabilityWeight{T}( rand(T == Float64 ? Float32 : Float64) )
@test typeof(e) == ProbabilityWeight{T}
@test typeof(f) == ProbabilityWeight{T}
@test (a*b).x == a.x*b.x
@test (a + b).x == a.x + b.x
@test (a/b).x == a.x/b.x
@test reverse(a) == a
@test FiniteStateTransducers.reverseback(a) == a
o = one(ProbabilityWeight{T})
@test o.x == one(T)
z = zero(ProbabilityWeight{T})
@test z.x == zero(T)
# + commutative monoid
@test (a+b)+c ≈ a+(b+c)
@test a+z ≈ z+a ≈ a
@test a+b ≈ b+a
# * monoid
@test (a*b)*c ≈ a*(b*c)
@test a*o == o*a == a # identity
@test a*z == z*a == z # annihilator
# distribution over addition
@test a*(b+c) ≈ (a*b)+(a*c)
@test (a+b)*c ≈ (a*c)+(b*c)
# division
c = a*b
@test a*(c/a) ≈ c
@test (c/b)*b ≈ c
# properties
W = ProbabilityWeight{T}
@test FiniteStateTransducers.isidempotent(W) == false
@test FiniteStateTransducers.iscommulative(W)
@test FiniteStateTransducers.isleft(W)
@test FiniteStateTransducers.isright(W)
@test FiniteStateTransducers.isweaklydivisible(W)
@test FiniteStateTransducers.iscomplete(W)
@test FiniteStateTransducers.ispath(W) == false
@test FiniteStateTransducers.reversetype(W) == W
a = parse(W,"1.0")
@test a.x == T(1.0)
#################
# LogWeight semiring
#################
a = LogWeight(1)
b = LogWeight(2)
@test (a + b).x ≈ log(exp(a.x)+exp(b.x))
a = LogWeight(2)
b = LogWeight(1)
@test (a + b).x ≈ log(exp(a.x)+exp(b.x))
a = LogWeight(rand(T))
b = LogWeight(rand(T))
c = LogWeight(rand(T))
d = LogWeight{T}(a) # check ProbabilityWeight(x::ProbabilityWeight)
e = LogWeight{T}( LogWeight{T == Float64 ? Float32 : Float64}(get(a)) )
@test typeof(e) == LogWeight{T}
@test (a*b).x ≈ a.x + b.x
@test (a + b).x ≈ log(exp(a.x)+exp(b.x))
@test (a / b).x ≈ log(exp(a.x)/exp(b.x))
@test reverse(a) == a
@test FiniteStateTransducers.reverseback(a) == a
o = one(LogWeight{T})
@test o.x == 0
z = zero(LogWeight{T})
@test z.x == T(log(0))
# + commutative monoid
@test (a+b)+c ≈ a+(b+c)
@test a+z ≈ z+a ≈ a
@test a+b ≈ b+a
# * monoid
@test (a*b)*c ≈ a*(b*c)
@test a*o == o*a == a # identity
@test a*z == z*a == z # annihilator
# distribution over addition
@test a*(b+c) ≈ (a*b)+(a*c)
@test (a+b)*c ≈ (a*c)+(b*c)
# division
c = a*b
@test a*(c/a) ≈ c
@test (c/b)*b ≈ c
# properties
W = LogWeight{T}
@test FiniteStateTransducers.isidempotent(W) == false
@test FiniteStateTransducers.iscommulative(W)
@test FiniteStateTransducers.isleft(W)
@test FiniteStateTransducers.isright(W)
@test FiniteStateTransducers.isweaklydivisible(W)
@test FiniteStateTransducers.iscomplete(W)
@test FiniteStateTransducers.ispath(W) == false
@test FiniteStateTransducers.reversetype(W) == W
# parse
a = parse(W,"1.0")
@test a.x == T(1.0)
#################
# NLogWeight semiring
#################
a = NLogWeight(1)
b = NLogWeight(2)
@test (a + b).x ≈ -log(exp(-a.x)+exp(-b.x))
a = NLogWeight(2)
b = NLogWeight(1)
@test (a + b).x ≈ -log(exp(-a.x)+exp(-b.x))
a = NLogWeight(rand(T))
b = NLogWeight(rand(T))
c = NLogWeight(rand(T))
d = NLogWeight{T}(a) # check ProbabilityWeight(x::ProbabilityWeight)
e = NLogWeight{T}( NLogWeight{T == Float64 ? Float32 : Float64}(get(a)) )
@test typeof(e) == NLogWeight{T}
@test (a*b).x ≈ a.x + b.x
@test (a + b).x ≈ -log(exp(-a.x)+exp(-b.x))
@test (a / b).x ≈ log(exp(a.x)/exp(b.x))
@test reverse(a) == a
@test FiniteStateTransducers.reverseback(a) == a
o = one(NLogWeight{T})
@test o.x == 0
z = zero(NLogWeight{T})
@test z.x == T(-log(0))
# + commutative monoid
@test (a+b)+c ≈ a+(b+c)
@test a+z ≈ z+a ≈ a
@test a+b ≈ b+a
# * monoid
@test (a*b)*c ≈ a*(b*c)
@test a*o == o*a == a # identity
@test a*z == z*a == z # annihilator
# distribution over addition
@test a*(b+c) ≈ (a*b)+(a*c)
@test (a+b)*c ≈ (a*c)+(b*c)
# division
c = a*b
@test a*(c/a) ≈ c
@test (c/b)*b ≈ c
# properties
W = NLogWeight{T}
@test FiniteStateTransducers.isidempotent(W) == false
@test FiniteStateTransducers.iscommulative(W)
@test FiniteStateTransducers.isleft(W)
@test FiniteStateTransducers.isright(W)
@test FiniteStateTransducers.isweaklydivisible(W)
@test FiniteStateTransducers.iscomplete(W)
@test FiniteStateTransducers.ispath(W) == false
@test FiniteStateTransducers.reversetype(W) == W
# parse
a = parse(W,"1.0")
@test a.x == T(1.0)
#################
# TropicalWeight semiring
#################
a = TropicalWeight(rand(T))
b = TropicalWeight(rand(T))
c = TropicalWeight(rand(T))
d = TropicalWeight{T}(a) # check ProbabilityWeight(x::ProbabilityWeight)
e = TropicalWeight{T}( TropicalWeight{T == Float64 ? Float32 : Float64}(get(a)) )
@test typeof(e) == TropicalWeight{T}
@test (a*b).x == a.x + b.x
@test (a + b).x == min(a.x,b.x)
@test (a / b).x ≈ log(exp(a.x)/exp(b.x))
o = one(TropicalWeight{T})
@test o.x == 0
z = zero(TropicalWeight{T})
@test z.x == T(Inf)
# + commutative monoid
@test (a+b)+c ≈ a+(b+c)
@test a+z ≈ z+a ≈ a
@test a+b ≈ b+a
# * monoid
@test (a*b)*c ≈ a*(b*c)
@test a*o == o*a == a # identity
@test a*z == z*a == z # annihilator
# distribution over addition
@test a*(b+c) ≈ (a*b)+(a*c)
@test (a+b)*c ≈ (a*c)+(b*c)
# division
c = a*b
@test a*(c/a) ≈ c
@test (c/b)*b ≈ c
W = TropicalWeight{T}
@test FiniteStateTransducers.isidempotent(W)
@test FiniteStateTransducers.iscommulative(W)
@test FiniteStateTransducers.isleft(W)
@test FiniteStateTransducers.isright(W)
@test FiniteStateTransducers.isweaklydivisible(W)
@test FiniteStateTransducers.iscomplete(W)
@test FiniteStateTransducers.ispath(W)
@test FiniteStateTransducers.reversetype(W) == W
# parse
a = parse(W,"1.0")
@test a.x == T(1.0)
end
a = BoolWeight(rand([true;false]))
b = BoolWeight(rand([true;false]))
c = BoolWeight(rand([true;false]))
d = BoolWeight(a) # check ProbabilityWeight(x::ProbabilityWeight)
@test (a*b).x == ( a.x && b.x)
@test (a + b).x == ( a.x || b.x )
o = one(BoolWeight)
@test o.x == true
z = zero(BoolWeight)
@test z.x == false
@test_throws MethodError a/b
# + commutative monoid
@test (a+b)+c ≈ a+(b+c)
@test a+z ≈ z+a ≈ a
@test a+b ≈ b+a
# * monoid
@test (a*b)*c ≈ a*(b*c)
@test a*o == o*a == a # identity
@test a*z == z*a == z # annihilator
# distribution over addition
@test a*(b+c) ≈ (a*b)+(a*c)
@test (a+b)*c ≈ (a*c)+(b*c)
W = BoolWeight
@test FiniteStateTransducers.isidempotent(W)
@test FiniteStateTransducers.iscommulative(W)
@test FiniteStateTransducers.isleft(W)
@test FiniteStateTransducers.isright(W)
@test FiniteStateTransducers.isweaklydivisible(W) == false
@test FiniteStateTransducers.ispath(W)
@test FiniteStateTransducers.reversetype(W) == W
a = parse(W,"true")
@test a.x == true
###########################
###########################
###########################
a = LeftStringWeight("ciao")
b = LeftStringWeight("caro")
c = LeftStringWeight("mona")
d = LeftStringWeight(a) # check ProbabilityWeight(x::ProbabilityWeight)
@test (a*b).x == "ciaocaro"
@test (a*c).x == "ciaomona"
@test (a + b).x == "c"
@test (a + c).x == ""
o = one(LeftStringWeight)
@test o.x == ""
@test o.iszero == false
z = zero(LeftStringWeight)
@test z.x == ""
@test z.iszero
# + commutative monoid
@test (a+b)+c == a+(b+c)
@test a+z == z+a == a
@test a+b == b+a
# * monoid
@test (a*b)*c == a*(b*c)
@test a*o == o*a == a # identity
@test a*z == z*a == z # annihilator
# distribution over addition
@test a*(b+c) == (a*b)+(a*c)
# division
c = a*b
@test a*(c/a) == c
@test (c/b)*b != c
W = LeftStringWeight
@test FiniteStateTransducers.isidempotent(W)
@test FiniteStateTransducers.iscommulative(W) == false
@test FiniteStateTransducers.isleft(W)
@test FiniteStateTransducers.isright(W) == false
@test FiniteStateTransducers.isweaklydivisible(W)
@test FiniteStateTransducers.ispath(W) == false
@test FiniteStateTransducers.reversetype(W) == RightStringWeight
###########################
###########################
###########################
a = RightStringWeight("ciao")
b = RightStringWeight("caro")
c = RightStringWeight("mona")
d = RightStringWeight(a) # check ProbabilityWeight(x::ProbabilityWeight)
@test (a*b).x == "ciaocaro"
@test (a*c).x == "ciaomona"
@test (a + b).x == "o"
@test (a + c).x == ""
o = one(RightStringWeight)
@test o.x == ""
@test o.iszero == false
z = zero(RightStringWeight)
@test z.x == ""
@test z.iszero
# + commutative monoid
@test (a+b)+c == a+(b+c)
@test a+z == z+a == a
@test a+b == b+a
# * monoid
@test (a*b)*c == a*(b*c)
@test a*o == o*a == a # identity
@test a*z == z*a == z # annihilator
# distribution over addition
@test (a+b)*c == (a*c)+(b*c)
# division
c = a*b
@test a*(c/a) != c
@test (c/b)*b == c
W = RightStringWeight
@test FiniteStateTransducers.isidempotent(W)
@test FiniteStateTransducers.iscommulative(W) == false
@test FiniteStateTransducers.isleft(W) == false
@test FiniteStateTransducers.isright(W)
@test FiniteStateTransducers.isweaklydivisible(W)
@test FiniteStateTransducers.ispath(W) == false
@test FiniteStateTransducers.reversetype(W) == LeftStringWeight
###########################
###########################
###########################
W1, W2 = LeftStringWeight, TropicalWeight{Float64}
a = ProductWeight(W1("ciao"),W2(1.0))
b = ProductWeight(W1("caro"),W2(2.0))
c = ProductWeight(W1("mona"),W2(3.0))
d = ProductWeight(a) # check ProbabilityWeight(x::ProbabilityWeight)
dd = typeof(d)(a)
@test (a*b) == ProductWeight(W1("ciaocaro"),W2(3.0))
@test (a*c) == ProductWeight(W1("ciaomona"),W2(4.0))
@test (a+b) == ProductWeight(W1("c"),W2(1.0))
@test (a+c) == ProductWeight(W1(""),W2(1.0))
W = typeof(a)
o = one(W)
@test get(o)[1] == one(W1)
@test get(o)[2] == one(W2)
z = zero(typeof(a))
@test get(z)[1] == zero(W1)
@test get(z)[2] == zero(W2)
# + commutative monoid
@test (a+b)+c == a+(b+c)
@test a+z == z+a == a
@test a+b == b+a
# * monoid
@test (a*b)*c == a*(b*c)
@test a*o == o*a == a # identity
@test a*z == z*a == z # annihilator
# distribution over addition
@test a*(b+c) == (a*b)+(a*c)
# division
c = a*b
@test a*(c/a) == c
@test FiniteStateTransducers.isidempotent(W)
@test FiniteStateTransducers.iscommulative(W) == false
@test FiniteStateTransducers.isleft(W)
@test FiniteStateTransducers.isright(W) == false
@test FiniteStateTransducers.isweaklydivisible(W)
@test FiniteStateTransducers.ispath(W) == false
@test FiniteStateTransducers.reversetype(W) == ProductWeight{2,Tuple{RightStringWeight,TropicalWeight{Float64}}}
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 497 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
fst = WFST(["a","b","c","d","f"])
add_arc!(fst,1,2,"a","a",3)
add_arc!(fst,2,2,"b","b",2)
add_arc!(fst,2,4,"c","c",4)
add_arc!(fst,1,3,"d","d",5)
add_arc!(fst,3,4,"f","f",4)
initial!(fst,1)
final!(fst,4,3)
#println(fst)
d = shortest_distance(fst)
@test all(get.(d) .== Float32[0.0;3.0;5.0;7.0])
d = shortest_distance(fst;reversed=true)
@test all(get.(d) .== Float32[10.0;7.0;7.0;3.0])
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 811 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
W = TropicalWeight{Float64}
sym=Dict("$(Char(i))"=>i for i=97:122)
fst = WFST(sym; W = W)
add_arc!(fst,1,3,"<eps>","<eps>",one(W))
add_arc!(fst,3,6,"f","f",one(W))
add_arc!(fst,6,4,"d","d",one(W))
add_arc!(fst,1,4,"a","a",one(W))
add_arc!(fst,4,5,"c","c",one(W))
add_arc!(fst,5,7,"b","b",one(W))
add_arc!(fst,7,2,"a","a",one(W))
add_arc!(fst,5,2,"b","b",one(W))
initial!(fst,1)
final!(fst,2)
println(fst)
perm = topsortperm(fst)
@test perm == [1,3,6,4,5,7,2]
sorted_fst = topsort(fst)
@test size(sorted_fst) == size(fst)
@test topsortperm(sorted_fst) == [1,2,3,4,5,6,7]
@test isfinal(sorted_fst,7)
# fails if fst has cycles
add_arc!(fst,2,1,"z","z",one(W))
@test_throws ErrorException topsort(fst)
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 4255 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
# weighted arc definition
W = ProbabilityWeight{Float32}
a1 = FiniteStateTransducers.Arc(1,1,W(0.5),2)
a2 = FiniteStateTransducers.Arc(1,1,W(0.5),2)
a3 = FiniteStateTransducers.Arc(1,1,W(0.8),2)
@test a1 == a2
@test (a1 == a3) == false
@test ==(a1,a3;check_weight=false) == true
# WFST constructor
isym = Dict(i=>i for i =1:5)
fst = WFST(isym; W = W)
add_states!(fst,4) # create 4 states
@test length(fst) == 4
initial!(fst,1)
@test 1 in get_initial(fst)
@test isinitial(fst,1) == true
final!(fst,4,0.5)
final!(fst,3)
@test 4 in keys(get_final(fst))
@test isfinal(fst,3) == true
initial!(fst,1,W(0.2))
### add arcs
ϵ = get_eps(Int)
add_arc!(fst, 1=>2, 5=>1)
add_arc!(fst, 1, 3, 1, 1, rand())
add_arc!(fst, 2, 4, 1, ϵ, rand())
add_arc!(fst, 2, 3, 1, ϵ, rand())
add_arc!(fst, 2, 3, 2, 2, rand())
add_arc!(fst, 3, 4, 3, 3, rand())
add_arc!(fst, 3, 3, 4, 4, rand())
@test_throws ErrorException add_arc!(fst, 3, 3, 6, 6, rand())
println(fst)
initial!(fst,2)
rm_initial!(fst,1)
@test get_initial(fst;single=true) == 2
rm_initial!(fst,2)
@test_throws ErrorException get_initial(fst)
rm_final!(fst,4)
println(fst)
rm_final!(fst,3)
@test_throws ErrorException get_final(fst)
## testing linear wfst
fst = linearfst([1,1,3,4],[1,2,3,4],rand(4),isym)
@test size(fst) == (5,4)
@test isinitial(fst,1)
@test isfinal(fst,5)
### matrix2fsa
Ns,Nt=3,10
sym=Dict("a"=>1,"b"=>2,"c"=>3)
X = rand(Ns,Nt)
fst = matrix2wfst(sym,X)
#println(fst)
@test size(fst) == (Nt+1,Ns*Nt)
@test isinitial(fst,1)
@test isfinal(fst,Nt+1)
X[2,2] = Inf # this is a tropical zero so no arc should be added
fst = matrix2wfst(sym,X; W = TropicalWeight{Float32})
@test size(fst) == (Nt+1,Ns*Nt-1)
@test isinitial(fst,1)
@test isfinal(fst,Nt+1)
# transduce symbol seq
isym=Dict(Char(i)=>i for i=97:122)
osym=Dict(Char(i)=>i for i=97:122)
W = ProbabilityWeight{Float64}
A = WFST(isym,osym; W=W);
add_arc!(A,1=>2,'h'=>'w',1)
add_arc!(A,2=>3,'e'=>'o',1)
add_arc!(A,3=>4,'l'=>'r',1)
add_arc!(A,4=>5,'l'=>'l',1)
add_arc!(A,5=>6,'o'=>'d',1)
add_arc!(A,5=>6,'ϵ'=>'d',0.001)
add_arc!(A,6=>6,'o'=>'ϵ',0.5)
initial!(A,1)
final!(A,6)
#println(A)
out,w = A(['h', 'e', 'l', 'l', 'o'])
@test out == ['w', 'o', 'r', 'l', 'd']
@test w == one(W)
out,w = A(['e', 'l'])
@test isempty(out)
@test w == zero(W)
out,w = A(['h'])
@test out == ['w']
@test w == zero(W)
out,w = A(['h'])
@test out == ['w']
@test w == zero(W)
out,w = A(['h', 'e', 'l', 'l', 'o', 'o', 'o', 'o'])
@test out == ['w', 'o', 'r', 'l', 'd']
@test w == W(0.5^3) # 3 times in self loop
out,w = A(['h', 'e', 'l', 'l', 'ϵ', 'o', 'o', 'o'])
@test out == ['w', 'o', 'r', 'l', 'd']
@test w == W(0.5^2) # 2 times in self loop
out,w = A(['h', 'e', 'l', 'l'])
@test out == ['w', 'o', 'r', 'l', 'd']
@test w == W(0.001)
B = WFST(isym,osym; W=W);
add_arc!(B,1=>2,'h'=>'n',1)
add_arc!(B,2=>3,'e'=>'ϵ',1)
add_arc!(B,3=>4,'ϵ'=>'ϵ',1)
add_arc!(B,4=>5,'l'=>'ϵ',1)
add_arc!(B,5=>6,'l'=>'ϵ',1)
add_arc!(B,6=>7,'ϵ'=>'o',1)
add_arc!(B,7=>7,'l'=>'ϵ',0.5)
initial!(B,1)
final!(B,7)
out,w = B(['h', 'e', 'l', 'l'])
@test out == ['n', 'o']
@test w == one(W)
out,w = B(['h', 'e', 'l', 'l', 'l', 'l'])
@test out == ['n', 'o']
@test w == W(0.5^2)
# TODO not sure this should stay in this package
#### fsa2transition
## transition matrix 3 state HMM with self loop in the middle
#Ns,Nt = 3,5
#a = [1.0, 0.0, 0.0]
#A = [
# 0.0 1.0 0.0;
# 0.0 0.5 0.5;
# 1.0 0.0 0.0;
# ]
#
## same topo with WFST
#W = Float64
#H = WFST(W,String,String)
#
## 3 state HMM
#add_arc!(H,1,2,"a1","a",1.0)
#add_arc!(H,2,3,"a2","<eps>",0.5)
#add_arc!(H,2,2,"a2","<eps>",0.5)
#add_arc!(H,3,4,"a3","<eps>",1.0)
#add_arc!(H,3,1,"a3","<eps>",1.0)
#initial!(H,1)
#final!(H,4)
##println(H)
#
#A2 = fst2transition(H)
#A = W.(A)
#@test all( A .== Array(A2) )
#@test all( sum( A2, dims=2 ) .≈ 1.0 )
#
#Nt = 10
#time2tr = fst2transition(H,Nt)
##println(time2tr)
#
#@test length(time2tr) == Nt-1
#@test all([ all(keys(tr) .<= length(get_isym(H)) ) for tr in time2tr])
#
#H2 = deepcopy(H)
#add_arc!(H2,1,2,"<eps>","a",1.0)
#@test_throws ErrorException fst2transition(H2)
#
#H2 = deepcopy(H)
#add_arc!(H2,2,2,"a2","<eps>",0.6)
#@test_throws ErrorException fst2transition(H2)
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | code | 561 | # Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Niccolò Antonello <[email protected]>
### fsa2transition
# transition matrix 3 state HMM with self loop in the middle
H = WFST(["a1","a2","a3"],["a"]);
add_arc!(H,1,2,"a1","a");
add_arc!(H,2,3,"a2","<eps>");
add_arc!(H,2,2,"a2","<eps>");
add_arc!(H,3,4,"a3","<eps>");
add_arc!(H,3,2,"a3","a");
initial!(H,1);
final!(H,4)
#println(H)
Nt = 10;
time2tr = wfst2tr(H,Nt)
#println(time2tr)
@test length(time2tr) == Nt-1
@test all([ all(keys(tr) .<= length(get_isym(H)) ) for tr in time2tr])
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | docs | 997 | # FiniteStateTransducers.jl
[](https://idiap.github.io/FiniteStateTransducers.jl/stable/)
[](https://idiap.github.io/FiniteStateTransducers.jl/dev/)
[](https://codecov.io/gh/idiap/FiniteStateTransducers.jl)
Play with Weighted Finite State Transducers (WFSTs) using the Julia language.
WFSTs provide a powerful framework that assigns a weight (e.g. probability) to conversions of symbol sequences.
WFSTs are used in many applications such as speech recognition, natural language processing and machine learning.
This package takes a lot of inspiration from [OpenFST](http://openfst.org/twiki/bin/view/FST/DeterminizeDoc).
FiniteStateTransducers is still in an early development stage, see the documentation for currently available features and the issues for the missing ones.
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | docs | 2743 | # [Introduction](@id intro)
## Weighted Finite State Transducers
Weighted finite state transducers (WFSTs) are graphs capable of translating an input sequence of symbols to an output sequence of symbols and associate a particular weight to this conversion.
Firstly we define the input and output symbols:
```julia
julia> isym = [s for s in "hello"];
julia> osym = [s for s in "world"];
```
We can construct a WFST by adding arcs, where each arc has an input label, an output label and a weight (which is typically defined in a particular [semiring](@ref weights)):
```julia
julia> using FiniteStateTransducers
julia> W = ProbabilityWeight{Float64} # weight type
julia> A = WFST(isym,osym; W=W); # empty wfst
julia> add_arc!(A,1=>2,'h'=>'w',1); # arc from state 1 to 2 with in label 'h' and out label 'w' and weight 1
julia> add_arc!(A,2=>3,'e'=>'o',1); # arc from state 2 to 3 with in label 'e' and out label 'w' and weight 0.5
julia> add_arc!(A,3=>4,'l'=>'r',1);
julia> add_arc!(A,4=>5,'l'=>'l',1);
julia> add_arc!(A,5=>6,'o'=>'d',1);
julia> initial!(A,1); final!(A,6) # set initial and final state
WFST #states: 6, #arcs: 5, #isym: 4, #osym: 5
|1/1.0|
h:w/1.0 → (2)
(2)
e:o/1.0 → (3)
(3)
l:r/1.0 → (4)
(4)
l:l/1.0 → (5)
(5)
o:d/1.0 → (6)
((6/1.0))
```
We can now plug the input sequence `['h','e','l','l','o']` into the WFST:
```julia
julia> A(['h','e','l','l','o'])
(['w', 'o', 'r', 'l', 'd'], 1.0)
```
The input sequence is translated into `['w', 'o', 'r', 'l', 'd']` with probability `1.0`.
A sequence that cannot be accepted will return a null probability instead:
```julia
julia> A(['h','e','l','l'])
(['w', 'o', 'r', 'l'], 0.0)
```
We could modify the WFST by adding an arc with an epsilon label, which is special symbol ϵ that can be skipped (see Sec. [Epsilon label](@ref) for more info):
```julia
julia> add_arc!(A,5=>6,'ϵ'=>'d',0.001);
julia> A(['h','e','l','l'])
(['w', 'o', 'r', 'l', 'd'], 0.001)
```
Here we used a small probability for this epsilon arc and this results in a low probability of the transduced output sequence.
In fact the resulting probability of a sequence is the product of the weights of the arcs that were accessed (see [Paths](@ref)).
!!! note
The method `(transduce::WFST)(ilabels::Vector)` transduce the sequence of `ilabel` using the WFST `fst` requires the WFST to be input deterministic, see [`is_deterministic`](@ref).
See [[1]](@ref references) for a good tutorial on WFST with the focus on speech recognition.
## [References](@id references)
- [1] [Mohri, Mehryar and Pereira, Fernando and Riley, Michael, "Speech Recognition with Weighted Finite-State Transducers," Springer Handb. Speech Process. 2008](http://www.openfst.org/twiki/pub/FST/FstBackground/hbka.pdf)
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | docs | 1139 | # [Weights](@id weights)
The weights of WFSTs typically belong to particular [semirings](https://en.wikipedia.org/wiki/Semiring).
The two binary operations ``\oplus`` and ``\otimes`` are exported as `+` and `*`.
The null element ``\bar{0}`` and unity element ``\bar{1}`` can be obtained using the functions `zero(W)` and `one(W)` where `W<:Semiring`.
## Semirings
```@docs
ProbabilityWeight
LogWeight
NLogWeight
TropicalWeight
BoolWeight
LeftStringWeight
RightStringWeight
ProductWeight
```
Use `get` to extract the contained object by the semiring:
```julia
julia> w = TropicalWeight{Float32}(2.3)
2.3f0
julia> typeof(w), typeof(get(w))
(TropicalWeight{Float32}, Float32)
```
## Semiring properties
Some algorithms are only available for WFST's whose weights belong to semirings that satisfies certain properties.
A list of these properties follows:
```@docs
FiniteStateTransducers.iscommulative
FiniteStateTransducers.isleft
FiniteStateTransducers.isright
FiniteStateTransducers.isweaklydivisible
FiniteStateTransducers.ispath
FiniteStateTransducers.isidempotent
```
Notice that these functions are not exported by the package.
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | docs | 4238 | # WFSTs internals
## Formal definition
Formally a WFSTs over the semiring ``\mathbb{W}`` is the tuple ``(\mathcal{A},\mathcal{B},Q,I,F,E,\lambda,\rho)`` where:
* ``\mathcal{A}`` is the input alphabet (set of input labels)
* ``\mathcal{B}`` is the output alphabet (set of output labels)
* ``Q`` is a set of states (usually integers)
* ``I \subseteq Q`` is the set of initial states
* ``F \subseteq Q`` is the set of final states
* ``E \subseteq Q \times \mathcal{A} \cup \{ \epsilon \} \times \mathcal{B} \cup \{ \epsilon \} \times \mathbb{W} \times Q`` is a set of arcs (transitions) where an element consist of the tuple (starting state,input label, output label, weigth, destination state)
* ``\lambda : I \rightarrow \mathbb{W}`` a function that maps initial states to a weight
* ``\rho : F \rightarrow \mathbb{W}`` a function that maps final states to a weight
## Constructors and modifiers
```@docs
WFST
add_arc!
add_states!
initial!
final!
rm_final!
rm_initial!
```
## Internal access
```@docs
get_isym
get_iisym
get_osym
get_iosym
get_states
get_initial
get_initial_weight
get_final
get_final_weight
get_ialphabet
get_oalphabet
get_arcs
```
## Arcs
```@docs
Arc
get_ilabel
get_olabel
get_weight
get_nextstate
```
## Paths
```@docs
Path
get_isequence
get_osequence
```
## Tour of the internals
Let's build a simple WFST and check out its internals:
```julia
julia> using FiniteStateTransducers
julia> A = WFST(["hello","world"],[:ciao,:mondo]);
julia> add_arc!(A,1=>2,"hello"=>:ciao);
julia> add_arc!(A,1=>3,"world"=>:mondo);
julia> add_arc!(A,2=>3,"world"=>:mondo);
julia> initial!(A,1);
julia> final!(A,3)
WFST #states: 3, #arcs: 3, #isym: 2, #osym: 2
|1/0.0f0|
hello:ciao/0.0f0 → (2)
world:mondo/0.0f0 → (3)
(2)
world:mondo/0.0f0 → (3)
((3/0.0f0))
```
For this simple WFST the states consists of an `Array` of `Array`s containing `Arc`:
```julia
julia> get_states(A)
3-element Array{Array{FiniteStateTransducers.Arc{TropicalWeight{Float32},Int64},1},1}:
[1:1/0.0f0 → (2), 2:2/0.0f0 → (3)]
[2:2/0.0f0 → (3)]
[]
```
As it can be seen the first state has two arcs, second state only one and the final state none.
A state can also be accessed as follows:
```julia
julia> A[2]
1-element Array{FiniteStateTransducers.Arc{TropicalWeight{Float32},Int64},1}:
2:2/0.0f0 → (3)
```
Here the arc's input/output labels are displayed as integers. We would expect `world:mondo/0.0f0 → (3)` instead of `2:2/0.0f0 → (3)`.
This is due to fact that, contrary to the formal definition, labels are not stored directly in the arcs but an index is used instead, which corresponds to the input/output symbol table:
```julia
julia> get_isym(A)
Dict{String,Int64} with 2 entries:
"hello" => 1
"world" => 2
julia> get_osym(A)
Dict{Symbol,Int64} with 2 entries:
:mondo => 2
:ciao => 1
```
Another difference is in the definition of ``I``, ``F``, ``\lambda`` and ``\rho``.
These are also represented by dictionaries that can be accessed using the functions [`get_initial`](@ref) and [`get_final`](@ref).
```julia
julia> get_final(A)
Dict{Int64,TropicalWeight{Float32}} with 1 entry:
3 => 0.0
```
## Epsilon label
```@docs
iseps
get_eps
```
By default `0` indicates the epsilon label which is not present in the symbol table.
Currently the following epsilon symbols are reserved for the following types:
| Type | Label |
|:--------:|:---------:|
| `Char` | `'ϵ'` |
| `String` | `"<eps>"` |
| `Int` | `0` |
We can define a particular type by extending the functions [`iseps`](@ref) and [`get_eps`](@ref).
For example if we want to introduce an epsilon symbol for the type `Symbol`:
```
julia> FiniteStateTransducers.iseps(x::Symbol) = x == :eps;
julia> FiniteStateTransducers.get_eps(x::Type{Symbol}) = :eps;
julia> add_arc!(A,3=>3,"world"=>:eps)
WFST #states: 3, #arcs: 5, #isym: 2, #osym: 2
|1/0.0f0|
hello:ciao/0.0f0 → (2)
world:mondo/0.0f0 → (3)
(2)
world:mondo/0.0f0 → (3)
((3/0.0f0))
world:ϵ/0.0f0 → (3)
julia> A[3]
1-element Array{Arc{TropicalWeight{Float32},Int64},1}:
2:0/0.0f0 → (3)
```
## Properties
```@docs
length
size
isinitial
isfinal
typeofweight
has_eps
count_eps
is_acceptor
is_acyclic
is_deterministic
```
## Other constructors
```@docs
linearfst
matrix2wfst
```
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | docs | 593 | ## Arcmap
```@docs
arcmap
inv
proj
```
## Closure
```@docs
closure
closure!
```
## Composition
```@docs
∘
Trivial
EpsMatch
EpsSeq
```
## Concatenation
```@docs
*
```
## Connect
```@docs
connect
connect!
```
## Determinize
```@docs
determinize_fsa
```
## Epsilon Removal
```@docs
rm_eps
rm_eps!
```
## Iterators
```@docs
BFS
get_paths
collectpaths
DFS
is_visited
```
## Reverse
```@docs
reverse
```
## Shortest Distance
```@docs
shortest_distance
```
## Topological sort
```@docs
topsort
topsortperm
get_scc
```
## WFST label transition extraction
```@docs
wfst2tr
```
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | docs | 61 | # Input/output
```@docs
txt2fst
txt2sym
fst2dot
fst2pdf
```
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MIT"
] | 0.1.0 | 42d75e0b4f7cbdc29911175ddaa61f14293c6f19 | docs | 995 | # FiniteStateTransducers.jl
Play with Weighted Finite State Transducers (WFSTs) using the Julia language.
WFSTs provide a powerful framework that assigns a weight (e.g. probability) to conversions of symbol sequences.
WFSTs are used in many applications such as speech recognition, natural language processing and machine learning.
This package takes a lot of inspiration from [OpenFST](http://openfst.org/twiki/bin/view/FST/DeterminizeDoc).
FiniteStateTransducers is still in an early development stage, see the documentation for currently available features and issues for the missing ones.
## Installation
To install the package, simply issue the following command in the Julia REPL:
```julia
] add FiniteStateTransducers
```
## Acknowledgements
This work was developed under
the supsrvision of Prof. Dr. Hervé Bourlard
and supported by the Swiss National Science Foundation
under the project
"Sparse and hierarchical Structures for Speech Modeling" (SHISSM)
(no. 200021.175589).
| FiniteStateTransducers | https://github.com/idiap/FiniteStateTransducers.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | code | 352 | using Documenter, ParametricLP
makedocs(
sitename = "ParametricLP",
modules = [ParametricLP],
clean = true,
format = Documenter.HTML(prettyurls = get(ENV, "CI", nothing) == "true"),
pages = ["ParametricLP" => "index.md", "API Reference" => "api.md"],
)
deploydocs(repo = "github.com/adow031/ParametricLP.jl.git", devurl = "docs")
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | code | 1366 | ## This example is based on the model used to illustrate JuMP's sensitivity report
## https://jump.dev/JuMP.jl/stable/tutorials/linear/lp_sensitivity/
using HiGHS, Plots, ParametricLP, JuMP
model = Model(HiGHS.Optimizer)
set_silent(model)
@variable(model, x >= 0)
@variable(model, 0 <= y <= 3)
@variable(model, z <= 1)
@variable(model, p[1:2])
@objective(model, Min, 12x + 20y - z)
@constraint(model, c1, 6x + 8y >= p[1])
@constraint(model, c2, 7x + 12y >= p[2])
@constraint(model, c3, x + y <= 20)
box = ((0.0, 150.0), (0.0, 155.0))
regions, πs = find_regions(model, p, box)
clims = (minimum([min(π[c1], π[c2]) for π in πs]), maximum([max(π[c1], π[c2]) for π in πs]))
p1 = plot(
[Shape(r) for r in regions],
fill_z = permutedims([π[c1] for π in πs]),
legend = false,
colorbar = false,
clims = clims,
color = cgrad([:green, :yellow, :red]),
);
p2 = plot(
[Shape(r) for r in regions],
fill_z = permutedims([π[c2] for π in πs]),
legend = false,
colorbar = false,
clims = clims,
color = cgrad([:green, :yellow, :red]),
);
h1 = scatter(
[0, 0],
[1, 1],
zcolor = collect(clims),
xlims = (1, 1.1),
label = "",
yshowaxis = false,
c = cgrad([:green, :yellow, :red]),
framestyle = :none,
);
l = @layout [grid(1, 2) a{0.06w}]
plot(p1, p2, h1, layout = l, size = (940, 380), link = :all)
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | code | 1598 | using HiGHS, Plots, ParametricLP, JuMP
function get_model(line_capacity::Float64)
model = JuMP.Model(HiGHS.Optimizer)
set_silent(model)
@variable(model, x >= 0)
@variable(model, y >= 0)
@variable(model, -line_capacity <= f <= line_capacity)
@variable(model, p[1:2])
@variable(model, 0 <= T[1:3, 1:2] <= 1)
nd1 = @constraint(model, sum(T[i, 1] for i in 1:3) + x - f == 3)
nd2 = @constraint(model, sum(T[i, 2] for i in 1:3) + y + f == 2)
@constraint(model, x <= p[1])
@constraint(model, y <= p[2])
@objective(model, Min, sum((i + (j - 1) / 2) * T[i, j] for i in 1:3, j in 1:2))
return model, p, [nd1, nd2]
end
model, p, cons = get_model(0.5)
box = ((0.0, 4.0), (0.0, 4.0))
regions, πs = find_regions(model, p, box)
clims = (
minimum([min(π[cons[1]], π[cons[2]]) for π in πs]),
maximum([max(π[cons[1]], π[cons[2]]) for π in πs]),
)
p1 = plot(
[Shape(r) for r in regions],
fill_z = permutedims([π[cons[1]] for π in πs]),
legend = false,
colorbar = false,
clims = clims,
color = cgrad([:grey, :yellow, :red]),
);
p2 = plot(
[Shape(r) for r in regions],
fill_z = permutedims([π[cons[2]] for π in πs]),
legend = false,
colorbar = false,
clims = clims,
color = cgrad([:grey, :yellow, :red]),
);
h1 = scatter(
[0, 0],
[1, 1],
zcolor = collect(clims),
xlims = (1, 1.1),
label = "",
yshowaxis = false,
c = cgrad([:grey, :yellow, :red]),
framestyle = :none,
);
l = @layout [grid(1, 2) a{0.06w}]
plot(p1, p2, h1, layout = l, size = (940, 380), link = :all)
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | code | 280 | using JuliaFormatter
function format_code()
i = 0
while i < 10
if format(dirname(@__DIR__))
return true
end
i += 1
end
return false
end
if format_code()
@info("Formatting complete")
else
@info("Formatting failed")
end
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | code | 123 | module ParametricLP
using JuMP, MathOptInterface
include("utilities.jl")
include("regions.jl")
export find_regions
end
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | code | 7332 | function find_region(
model::JuMP.Model,
parameters::Vector{VariableRef},
box::Tuple{Tuple{Float64,Float64},Tuple{Float64,Float64}},
point::Tuple{Float64,Float64},
ϵ = 0.01,
)
fix(parameters[1], point[1])
fix(parameters[2], point[2])
optimize!(model)
if termination_status(model) != MOI.OPTIMAL
unfix(parameters[1])
unfix(parameters[2])
return Tuple{Float64,Float64}[], Tuple{Float64,Float64}[], Dict()
end
sfm = JuMP._standard_form_matrix(model)
sfb = JuMP._standard_form_basis(model, sfm)
cons = all_constraints(model, include_variable_in_set_constraints = false)
π = Dict(zip(cons, dual.(cons)))
reverselookup = Dict{Int,VariableRef}()
bounds = Dict{Int,Tuple{Float64,Float64}}()
newcons = ConstraintRef[]
for index in eachindex(sfm.bounds)
reverselookup[index] = JuMP.constraint_object(sfm.bounds[index]).func
end
vars = all_variables(model)
solution = Dict(zip(vars, value.(vars)))
for i in eachindex(sfb.bounds)
if sfb.bounds[i] != MathOptInterface.BASIC && reverselookup[i] ∉ parameters
var = reverselookup[i]
lb = has_lower_bound(var) ? lower_bound(var) : -Inf
ub = has_upper_bound(var) ? upper_bound(var) : Inf
bounds[i] = (lb, ub)
fix(var, solution[var], force = true)
end
end
for i in eachindex(sfb.constraints)
if sfb.constraints[i] != MathOptInterface.BASIC
constr_set = MOI.get(model, MOI.ConstraintSet(), sfm.constraints[i])
rhs = nothing
if typeof(constr_set) <: MOI.GreaterThan
rhs = constr_set.lower
elseif typeof(constr_set) <: MOI.LessThan
rhs = constr_set.upper
end
if rhs !== nothing
con = @constraint(model, 0 == rhs)
for (var, val) in JuMP.constraint_object(sfm.constraints[i]).func.terms
set_normalized_coefficient(con, var, val)
end
push!(newcons, con)
end
end
end
unfix(parameters[1])
unfix(parameters[2])
set_lower_bound(parameters[1], box[1][1])
set_upper_bound(parameters[1], box[1][2])
set_lower_bound(parameters[2], box[2][1])
set_upper_bound(parameters[2], box[2][2])
original_obj = objective_function(model)
corners = Tuple{Float64,Float64}[]
for sgn in [-1, 1]
for direction in [:pos, :neg]
nextvalue = 0.0
while true
@objective(model, Min, nextvalue * parameters[1] + sgn * parameters[2])
optimize!(model)
if termination_status(model) == MOI.OPTIMAL
push!(corners, Tuple(value.(parameters)))
report = JuMP.lp_sensitivity_report(model)
if direction == :pos && report.objective[parameters[1]][2] != Inf
nextvalue += report.objective[parameters[1]][2] + ϵ
elseif direction == :neg && report.objective[parameters[1]][1] != -Inf
nextvalue += report.objective[parameters[1]][1] - ϵ
else
break
end
else
break
end
end
end
end
unique!(fix_minus_zero, corners)
if length(corners) == 1
seeds = [
(corners[1][1] + ϵ * i[1], corners[1][2] + ϵ * i[2]) for
i in [(-1, -1), (-1, 1), (1, -1), (1, 1)]
]
else
c =
(
sum(corners[i][1] for i in eachindex(corners)),
sum(corners[i][2] for i in eachindex(corners)),
) ./ length(corners)
sort!(
corners,
lt = (x, y) -> atan(x[1] - c[1], x[2] - c[2]) < atan(y[1] - c[1], y[2] - c[2]),
)
offsets = [
(
(corners[i][2] - corners[i%length(corners)+1][2]),
(corners[i%length(corners)+1][1] - corners[i][1]),
) .* (
ϵ / sqrt(
(corners[i][1] - corners[i%length(corners)+1][1])^2 +
(corners[i%length(corners)+1][2] - corners[i][2])^2,
)
) for i in eachindex(corners)
]
seeds = [
(
corners[i][1] + corners[i%length(corners)+1][1] + 2 * offsets[i][1],
corners[i][2] + corners[i%length(corners)+1][2] + 2 * offsets[i][2],
) ./ 2 for i in eachindex(corners)
]
end
for c in newcons
delete(model, c)
end
for (index, bound) in bounds
var = reverselookup[index]
unfix(var)
if bound[1] != -Inf
set_lower_bound(var, bound[1])
end
if bound[2] != Inf
set_upper_bound(var, bound[2])
end
end
delete_lower_bound(parameters[1])
delete_upper_bound(parameters[1])
delete_lower_bound(parameters[2])
delete_upper_bound(parameters[2])
set_objective(model, MOI.MIN_SENSE, original_obj)
return corners, seeds, π
end
"""
find_regions(
model::JuMP.Model,
parameters::Vector{VariableRef},
box::Tuple{Tuple{Float64,Float64},Tuple{Float64,Float64}},
ϵ = 0.01,
)
This function finds all the optimal bases and their corresponding regions in terms of two `parameters` in a linear programming `model`.
### Required arguments
`model` is a `JuMP.Model` linear programme, defined in a way where two of the right-hand side values are replaced by variables.
`parameters` is a vector of two variables that appear on the right-hand side of the two constraints that we wish to explore parametrically.
`box` is a Tuple of Tuples, this defines the minimum and maximum value for each of the `parameters` described above.
### Optional arguments
`ϵ` is a tolerance value used when ensuring that we find an adjacent basis.
"""
function find_regions(
model::JuMP.Model,
parameters::Vector{VariableRef},
box::Tuple{Tuple{Float64,Float64},Tuple{Float64,Float64}},
ϵ = 0.01,
)
πs = []
regions = Vector{Tuple{Float64,Float64}}[]
seeds_list = [
(box[1][1], box[2][1]),
(box[1][2], box[2][1]),
(box[1][2], box[2][2]),
(box[1][1], box[2][2]),
]
while length(seeds_list) > 0
seed = pop!(seeds_list)
duplicate =
!insidePolygon(
[
(box[1][1], box[2][1]),
(box[1][2], box[2][1]),
(box[1][2], box[2][2]),
(box[1][1], box[2][2]),
],
seed,
)
if duplicate == false
for i in eachindex(regions)
if insidePolygon(regions[i], seed)
duplicate = true
break
end
end
if !duplicate
corners, seeds, π = find_region(model, parameters, box, seed, ϵ)
if length(corners) != 0
push!(regions, corners)
push!(πs, π)
seeds_list = [seeds_list; seeds]
end
end
end
end
return regions, πs
end
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | code | 1493 | function insidePolygon(corners, point)
check_sign = 0.0
if length(corners) == 1
if point[1] != corners[1][1] || point[2] != corners[1][2]
return false
else
return true
end
end
for k in eachindex(corners)
if k == length(corners)
current_poly = (corners[1][1] - corners[k][1], corners[1][2] - corners[k][2])
else
current_poly =
(corners[k+1][1] - corners[k][1], corners[k+1][2] - corners[k][2])
end
point_vect = (point[1] - corners[k][1], point[2] - corners[k][2])
current_sign = cross_product(current_poly, point_vect)
if check_sign == 0.0
check_sign = current_sign
elseif check_sign != current_sign && current_sign != 0.0
return false
end
end
return true
end
function cross_product(v1::Tuple{Float64,Float64}, v2::Tuple{Float64,Float64})
val = v1[1] * v2[2] - v1[2] * v2[1]
# This could theoretically cause an early termination of the algorithm, but
# in practice prevents infinite loops.
if abs(val) < 1e-8
val = 0.0
end
return sign(val)
end
function fix_minus_zero(x::Tuple{Float64,Float64})
if x[1] == 0.0
if x[2] == -0.0
return (0.0, 0.0)
else
return (0.0, x[2])
end
else
if x[2] == -0.0
return (x[1], 0.0)
else
return (x[1], x[2])
end
end
end
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | code | 1428 | using Test, ParametricLP, JuMP, HiGHS
function get_model()
model = JuMP.Model(HiGHS.Optimizer)
set_silent(model)
@variable(model, x >= 0)
@variable(model, y >= 0)
@variable(model, p[1:2])
@variable(model, 0 <= T[1:3] <= 1)
con = @constraint(model, sum(T[i] for i in 1:3) + x + y == 5)
@constraint(model, x <= p[1])
@constraint(model, y <= p[2])
@objective(model, Min, sum(i * T[i] for i in 1:3))
return model, p, con
end
model, p, con = get_model()
box = ((0.0, 4.0), (0.0, 4.0))
regions, πs = find_regions(model, p, box)
@testset "Utility Tests" begin
@test ParametricLP.insidePolygon([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0)], (0.25, 0.25)) ==
true
@test ParametricLP.insidePolygon([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0)], (0.6, 0.25)) ==
true
@test ParametricLP.insidePolygon([(0.0, 0.0), (1.0, 0.0), (0.0, 1.0)], (0.8, 0.25)) ==
false
@test ParametricLP.cross_product((1.0, 0.2), (-0.5, 0.5)) == 1.0
@test ParametricLP.cross_product((1.0, 0.2), (0.5, -0.5)) == -1.0
@test ParametricLP.cross_product((1.0, 0.2), (0.5, 0.1)) == 0.0
end
@testset "Region Tests" begin
@test length(regions) == 4
@test πs[1][con] == 1.0
@test πs[2][con] == 2.0
@test πs[3][con] == 3.0
@test πs[4][con] == 0.0
@test sum([v[1] + v[2] for v in regions[1]]) == 18.0
@test sum([v[1] + v[2] for v in regions[3]]) == 10.0
end
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | docs | 1404 | | **Documentation** | **Build Status** | **Coverage** |
|:-----------------:|:--------------------:|:----------------:|
| [![][docs-latest-img]][docs-latest-url] | [![Build Status][build-img]][build-url] | [![Codecov branch][codecov-img]][codecov-url]
# ParametricLP.jl
This package utilises JuMP to define a two-dimensional parametric representation of the dual solution of a linear programme,
as a function of the two right-hand side values. If you use this package for academic work, please cite this paper:
M. Habibian et al. Co-optimization of demand response and interruptible load reserve offers for a price-making major
consumer. Energy Systems, 11, pp. 45-71, (2020).
## Example
There is an example based on a two-node electricity dispatch problem provided in the examples folder.
## Issues
This package has not yet been extensively tested. If you encounter any problems when using this package,
submit an issue.
[build-img]: https://github.com/adow031/ParametricLP.jl/workflows/CI/badge.svg?branch=main
[build-url]: https://github.com/adow031/ParametricLP.jl/actions?query=workflow%3ACI
[codecov-img]: https://codecov.io/github/adow031/ParametricLP.jl/coverage.svg?branch=main
[codecov-url]: https://codecov.io/github/adow031/ParametricLP.jl?branch=main
[docs-latest-img]: https://img.shields.io/badge/docs-latest-blue.svg
[docs-latest-url]: https://adow031.github.io/ParametricLP.jl
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | docs | 92 | # API Reference
## ParametricLP Exported Functions
```@docs
ParametricLP.find_regions
```
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MPL-2.0"
] | 0.1.0 | 58ea3a2f7602427c371dd2494ba46e072332ede7 | docs | 181 | ```@meta
CurrentModule = ParametricLP
DocTestSetup = quote
using ParametricLP
end
```
# ParametricLP.jl
## Installation
`] add https://github.com/adow031/ParametricLP.jl.git`
| ParametricLP | https://github.com/adow031/ParametricLP.jl.git |
|
[
"MIT"
] | 0.1.0 | ac648f8005b4c75583b150be2073edc67433bb0c | code | 295 | module BitVectorExtensions
export rshift, rshift!, lshift, lshift!
import Base: BitVector, copy_chunks!, get_chunks_id, _msk64, glue_src_bitchunks, _div64, _mod64, copy_chunks_rtol!
const _msk128 = ~UInt128(0)
include("constructor_unsigned.jl")
include("shifts.jl")
include("utils.jl")
end
| BitVectorExtensions | https://github.com/andrewjradcliffe/BitVectorExtensions.jl.git |
|
[
"MIT"
] | 0.1.0 | ac648f8005b4c75583b150be2073edc67433bb0c | code | 2186 | ## Constructors from Unsigned ##
@noinline throw_invalid_nbits(n::Integer) = throw(ArgumentError("length must be ≤ 128, got $n"))
"""
BitVector(x::Unsigned, n::Integer)
Construct a `BitVector` of length 0 ≤ `n` ≤ 128 from an unsigned integer.
If `n < 8sizeof(x)`, only the first `n` bits, numbering from the right, will be
preserved; this is equivalent to `m = 8sizeof(x) - n; BitVector(x << m >> m, n)`.
If `n > 8sizeof(x)`, the leading bit positions (reading left to right) are
zero-filled analogously to unsigned integer literals.
# Examples
```jldoctest
julia> B = BitVector(0b101, 4)
4-element BitVector:
1
0
1
0
julia> B == BitVector((true, false, true, false))
true
julia> B == BitVector(0xf5, 4)
true
julia> BitVector(0xff, 0)
0-element BitVector
julia> m = 8sizeof(0xf5) - 3
5
julia> BitVector(0xf5 << m >> m, 3) == BitVector(0xf5, 3)
true
julia> BitVector(0b110, 8sizeof(0b110) - leading_zeros(0b110))
3-element BitVector:
0
1
1
```
"""
function BitVector(x::Unsigned, n::Integer)
n ≤ 128 || throw_invalid_nbits(n)
B = BitVector(undef, n)
Bc = B.chunks
n != 0 && (Bc[1] = _msk64 >> (64 - n) & x)
n > 64 && (Bc[2] = ~_msk64)
return B
end
"""
BitVector(x::Unsigned)
Construct a `BitVector` of length `8sizeof(x)` from an unsigned integer following
the [LSB 0 bit numbering scheme](https://en.wikipedia.org/wiki/Bit_numbering)
(except 1-indexed). Leading bit positions are zero-filled analogously to unsigned
integer literals.
# Examples
```jldoctest
julia> BitVector(0b10010100)
8-element BitVector:
0
0
1
0
1
0
0
1
julia> BitVector(0b10100)
8-element BitVector:
0
0
1
0
1
0
0
0
julia> BitVector(0xff) == trues(8)
true
```
"""
BitVector(x::Unsigned) = BitVector(x, 8 * sizeof(x))
function BitVector(x::UInt128)
B = BitVector(undef, 128)
Bc = B.chunks
Bc[1] = x % UInt64
Bc[2] = x >> 64 % UInt64
return B
end
function BitVector(x::UInt128, n::Integer)
n ≤ 128 || throw_invalid_nbits(n)
B = BitVector(undef, n)
if n != 0
Bc = B.chunks
y = _msk128 >> (128 - n) & x
Bc[1] = y % UInt64
n > 64 && (Bc[2] = y >> 64 % UInt64)
end
return B
end
| BitVectorExtensions | https://github.com/andrewjradcliffe/BitVectorExtensions.jl.git |
|
[
"MIT"
] | 0.1.0 | ac648f8005b4c75583b150be2073edc67433bb0c | code | 5633 | # Necessary for avoiding a copy on rshift! when dest===src
# -- several of the bitshifts involving ld0 can probably be eliminated.
function copy_chunks_rshift!(dest::Vector{UInt64}, pos_d::Int, src::Vector{UInt64}, pos_s::Int, numbits::Int)
numbits == 0 && return
if dest === src && pos_d > pos_s
return copy_chunks_rtol!(dest, pos_d, pos_s, numbits)
end
kd0, ld0 = get_chunks_id(pos_d)
kd1, ld1 = get_chunks_id(pos_d + numbits - 1)
ks0, ls0 = get_chunks_id(pos_s)
ks1, ls1 = get_chunks_id(pos_s + numbits - 1)
delta_kd = kd1 - kd0
delta_ks = ks1 - ks0
u = _msk64
if delta_kd == 0
msk_d0 = ~(u << ld0) | (u << (ld1+1))
else
msk_d0 = ~(u << ld0)
msk_d1 = (u << (ld1+1))
end
if delta_ks == 0
msk_s0 = (u << ls0) & ~(u << (ls1+1))
else
msk_s0 = (u << ls0)
end
chunk_s0 = glue_src_bitchunks(src, ks0, ks1, msk_s0, ls0)
dest[kd0] = ((chunk_s0 << ld0) & ~msk_d0)
if delta_kd == 0
for i = kd0+1:length(dest)
dest[i] = ~_msk64
end
return
end
for i = 1 : kd1 - kd0 - 1
chunk_s1 = glue_src_bitchunks(src, ks0 + i, ks1, msk_s0, ls0)
chunk_s = (chunk_s0 >>> (64 - ld0)) | (chunk_s1 << ld0)
dest[kd0 + i] = chunk_s
chunk_s0 = chunk_s1
end
if ks1 >= ks0 + delta_kd
chunk_s1 = glue_src_bitchunks(src, ks0 + delta_kd, ks1, msk_s0, ls0)
else
chunk_s1 = UInt64(0)
end
chunk_s = (chunk_s0 >>> (64 - ld0)) | (chunk_s1 << ld0)
dest[kd1] = (chunk_s & ~msk_d1)
for i = kd1+1:length(dest)
dest[i] = ~_msk64
end
return
end
@inline function _msk_rtol!(dest::Vector{UInt64}, i::Int)
kd1, ld1 = get_chunks_id(i)
dest[kd1] &= (_msk64 << (ld1+1))
for k = kd1-1:-1:1
dest[k] = ~_msk64
end
return
end
function rshift!(dest::BitVector, src::BitVector, i::Int)
length(dest) == length(src) || throw(ArgumentError("destination and source should be of same size"))
n = length(dest)
abs(i) < n || return fill!(dest, false)
i == 0 && return (src === dest ? src : copyto!(dest, src))
if i > 0 # right
copy_chunks_rshift!(dest.chunks, 1, src.chunks, i+1, n-i)
else # left
i = -i
copy_chunks_rshift!(dest.chunks, i+1, src.chunks, 1, n-i)
_msk_rtol!(dest.chunks, i)
end
return dest
end
"""
rshift!(dest::BitVector, src::BitVector, i::Integer)
Shift the elements of `src` right by `n` bit positions, filling with `false` values,
storing the result in `dest`. If `n < 0`, elements are shifted to the left.
See also: [`rshift`](@ref), [`lshift!`](@ref)
# Examples
```jldoctest
julia> B = BitVector((false, false, false));
julia> rshift!(B, BitVector((true, true, true,)), 2)
3-element BitVector:
1
0
0
julia> rshift!(B, BitVector((true, true, true,)), -2)
3-element BitVector:
0
0
1
```
"""
rshift!(dest::BitVector, src::BitVector, i::Integer) = rshift!(dest, src, Int(i))
"""
rshift!(B::BitVector, i::Integer)
Shift the elements of `B` right by `n` bit positions, filling with `false` values.
If `n < 0`, elements are shifted to the left.
# Examples
```jldoctest
julia> B = BitVector((true, true, true));
julia> rshift!(B, 2)
3-element BitVector:
1
0
0
julia> rshift!(B, 1)
3-element BitVector:
0
0
0
```
"""
rshift!(B::BitVector, i::Integer) = rshift!(B, B, i)
# Same as (<<)(B::BitVector, i::UInt)
"""
rshift(B::BitVector, n::Integer)
Return `B` with the elements shifted right by `n` bit positions, filling with
`false` values. If `n < 0`, elements are shifted to the left.
See also: [`rshift!`](@ref), [`lshift`](@ref)
# Examples
```jldoctest
julia> B = BitVector([true, false, true, false, false])
5-element BitVector:
1
0
1
0
0
julia> rshift(B, 1) == B << 1 # Notice opposite behavior
true
julia> rshift(B, 2)
5-element BitVector:
1
0
0
0
0
```
"""
rshift(B::BitVector, i::Integer) = rshift!(similar(B), B, i)
"""
lshift!(dest::BitVector, src::BitVector, i::Integer)
Shift the elements of `src` left by `n` bit positions, filling with `false` values,
storing the result in `dest`. If `n < 0`, elements are shifted to the right.
See also: [`lshift`](@ref), [`rshift!`](@ref)
# Examples
```jldoctest
julia> B = BitVector((false, false, false));
julia> lshift!(B, BitVector((true, true, true,)), 2)
3-element BitVector:
0
0
1
julia> lshift!(B, BitVector((true, true, true,)), -2)
3-element BitVector:
1
0
0
```
"""
lshift!(dest::BitVector, src::BitVector, i::Integer) = rshift!(dest, src, -i)
lshift!(dest::BitVector, src::BitVector, i::Unsigned) = rshift!(dest, src, -Int(i))
"""
lshift!(B::BitVector, i::Integer)
Shift the elements of `B` left by `n` bit positions, filling with `false` values.
If `n < 0`, elements are shifted to the right.
# Examples
```jldoctest
julia> B = BitVector((true, true, true));
julia> lshift!(B, 2)
3-element BitVector:
0
0
1
julia> lshift!(B, 1)
3-element BitVector:
0
0
0
"""
lshift!(B::BitVector, i::Integer) = lshift!(B, B, i)
# Same as (>>>)(B::BitVector, i::UInt)
"""
lshift(B::BitVector, n::Integer)
Return `B` with the elements shifted left by `n` bit positions, filling with
`false` values. If `n < 0`, elements are shifted to the right.
See also: [`rshift`](@ref)
# Examples
```jldoctest
julia> B = BitVector([true, false, true, false, false])
5-element BitVector:
1
0
1
0
0
julia> lshift(B, 1) == B >> 1 # Notice opposite behavior
true
julia> lshift(B, 2)
5-element BitVector:
0
0
1
0
1
```
"""
lshift(B::BitVector, i::Integer) = lshift!(similar(B), B, i)
| BitVectorExtensions | https://github.com/andrewjradcliffe/BitVectorExtensions.jl.git |
|
[
"MIT"
] | 0.1.0 | ac648f8005b4c75583b150be2073edc67433bb0c | code | 284 | function bitonehot(I::Vector{Int}, n::Int)
b = falses(n)
for i ∈ I
b[i] = true
end
b
end
bitonehot(I::Vector{Int}) = bitonehot(I, maximum(I))
function sum_to_int(b::BitVector)
s = 0
for i ∈ eachindex(b)
s += b[i] * 2^(i - 1)
end
s
end
| BitVectorExtensions | https://github.com/andrewjradcliffe/BitVectorExtensions.jl.git |
|
[
"MIT"
] | 0.1.0 | ac648f8005b4c75583b150be2073edc67433bb0c | code | 1683 | @testset "construct BitVector from Unsigned" begin
# truncation
for u in (0x12, 0x1234, 0x12345678, 0x123456789abcdef, 0x0f1e2d3c4b5a69780123456789abcdef)
sz = 8sizeof(u)
for n = 0:sz
m = sz - n
@test BitVector(u, n) == BitVector(u << m >> m, n)
end
end
# (bit)reverse
for u in (0x12, 0x1234, 0x12345678, 0x123456789abcdef, 0x0f1e2d3c4b5a69780123456789abcdef)
for T in (UInt8, UInt16, UInt32, UInt64)
t = u % T
b = BitVector(t)
rb = reverse(b)
@test rb.chunks[1] == bitreverse(t)
end
end
b = BitVector(UInt128(0xf5))
rb = reverse(b)
@test rb.chunks[1] == 0x0000000000000000
@test rb.chunks[2] == bitreverse(UInt128(0xf5)) >> 64 % UInt64
# there and back again
is = [1,5,32]
@test bitonehot(is) == BitVector(0x80000011)
is = [1,4,6,8]
@test bitonehot(is) == BitVector(0xa9)
# rotation -- somewhat excessive since circshift is already the same as bitrotate.
# (it's correct by construction, but a desirable property)
for u in (0x12, 0x1234, 0x12345678, 0x123456789abcdef, 0x0f1e2d3c4b5a69780123456789abcdef)
b = BitVector(u)
sz = 8sizeof(u)
for n = -sz:sz
@test circshift(b, n) == BitVector(bitrotate(u, n))
end
end
# other properties
for u in (0xf5, 0xf0, 0x0f, 0x10, 0x01, 0x12, 0x1234, 0x12345678, 0x123456789abcdef, 0x0f1e2d3c4b5a69780123456789abcdef)
B = BitVector(u)
@test sum(B) == count_ones(u)
@test findfirst(B) == trailing_zeros(u) + 1
@test findlast(B) == 8sizeof(u) - leading_zeros(u)
end
end
| BitVectorExtensions | https://github.com/andrewjradcliffe/BitVectorExtensions.jl.git |
|
[
"MIT"
] | 0.1.0 | ac648f8005b4c75583b150be2073edc67433bb0c | code | 196 | using BitVectorExtensions
using Test
using BitVectorExtensions: bitonehot
using Random
@testset "BitVectorExtensions.jl" begin
include("constructor_unsigned.jl")
include("shifts.jl")
end
| BitVectorExtensions | https://github.com/andrewjradcliffe/BitVectorExtensions.jl.git |
|
[
"MIT"
] | 0.1.0 | ac648f8005b4c75583b150be2073edc67433bb0c | code | 5111 | @testset "BitVector l/r-shift[!] tests" begin
b = BitVector(0x123456789abcdef)
for i = -65:65
r = rshift(b, i)
@test r.chunks[1] == 0x123456789abcdef >> i
l = lshift(b, i)
@test l.chunks[1] == 0x123456789abcdef << i
# test vs. extant behavior of shift operators
@test r == b << i
@test l == b >> i
end
b = BitVector(0x0f1e2d3c4b5a69780123456789abcdef)
for i = -129:129
r = rshift(b, i)
@test r.chunks[1] == 0x0f1e2d3c4b5a69780123456789abcdef >> i % UInt64
@test r.chunks[2] == 0x0f1e2d3c4b5a69780123456789abcdef >> i >> 64 % UInt64
l = lshift(b, i)
@test l.chunks[1] == 0x0f1e2d3c4b5a69780123456789abcdef << i % UInt64
@test l.chunks[2] == 0x0f1e2d3c4b5a69780123456789abcdef << i >> 64 % UInt64
# test vs. extant behavior of shift operators
@test r == b << i
@test l == b >> i
end
# With truncation
@test rshift(BitVector(0x123456789abcdef, 20), 10) == BitVector(0xbcdef >> 10, 20)
@test lshift(BitVector(0x123456789abcdef, 20), 10) == BitVector(0xbcdef << 10, 20)
# Mutation: into self and into another dest
b = BitVector(0xffffffffffffffff)
@test_throws ArgumentError rshift!(b, falses(3), 2)
@test_throws ArgumentError lshift!(b, falses(3), 2)
for n in (64, 65, 79, 127, 128, 129, 158, 191, 192, 193, 1023, 1024, 1025)
b = trues(n)
t = trues(n)
c = similar(b)
# push bits off one by one
for i = n-1:-1:0
rshift!(b, 1)
rshift!(c, t, n-i)
@test sum(b) == i == sum(c)
end
# place bit at right-most, then walk it to left-most, then back again
b .= false
b[1] = true
for r = (2:1:n, -(n-1):1:-1)
for i = r
rshift!(b, signbit(i) ? 1 : -1)
@test b[abs(i)]
end
end
rshift!(b, t, 0)
@test all(b)
@test b.chunks !== t.chunks
for i in (1, 2, 3, 4, 5, 17, 24, 37, 63, 64)
rshift!(b, t, 0)
rshift!(b, i)
rshift!(c, t, i)
@test sum(b) == n - i == sum(c)
end
for i = n-2:n
rshift!(b, t, 0)
rshift!(b, i)
rshift!(c, t, i)
@test sum(b) == n - i == sum(c)
end
for i = 64:64:n-1
rshift!(b, t, 0)
rshift!(b, i)
rshift!(c, t, i)
@test sum(b) == n - i == sum(c)
rshift!(b, t, 0)
rshift!(b, i - 1)
rshift!(c, t, i - 1)
@test sum(b) == n - (i - 1) == sum(c)
rshift!(b, t, 0)
rshift!(b, i + 1)
rshift!(c, t, i + 1)
@test sum(b) == n - (i + 1) == sum(c)
end
#
b = bitrand(n)
t = deepcopy(b)
rshift!(b, t, 0)
@test b.chunks !== t.chunks
for i in (1, 2, 3, 4, 5, 17, 24, 37, 63, 64, n, n + 1)
rshift!(b, t, 0)
rshift!(b, i)
rshift!(c, t, i)
@test sum(b) == sum(t[i+1:n]) == sum(c)
@test b == rshift(t, i) == rshift(t, i)
end
end
for n in (64, 65, 79, 127, 128, 129, 158, 191, 192, 193, 1023, 1024, 1025)
b = trues(n)
t = trues(n)
c = similar(b)
# push bits off one by one
for i = n-1:-1:0
lshift!(b, 1)
lshift!(c, t, n-i)
@test sum(b) == i == sum(c)
end
# place bit at left-most, then walk it to right-most, then back again
b .= false
b[end] = true
for r = (-(n-1):1:-1, 2:1:n)
for i = r
lshift!(b, signbit(i) ? -1 : 1)
@test b[abs(i)]
end
end
lshift!(b, t, 0)
@test all(b)
@test b.chunks !== t.chunks
for i in (1, 2, 3, 4, 5, 17, 24, 37, 63, 64)
lshift!(b, t, 0)
lshift!(b, i)
lshift!(c, t, i)
@test sum(b) == n - i == sum(c)
end
for i = n-2:n
lshift!(b, t, 0)
lshift!(b, i)
lshift!(c, t, i)
@test sum(b) == n - i == sum(c)
end
for i = 64:64:n-1
lshift!(b, t, 0)
lshift!(b, i)
lshift!(c, t, i)
@test sum(b) == n - i == sum(c)
lshift!(b, t, 0)
lshift!(b, i - 1)
lshift!(c, t, i - 1)
@test sum(b) == n - (i - 1) == sum(c)
lshift!(b, t, 0)
lshift!(b, i + 1)
lshift!(c, t, i + 1)
@test sum(b) == n - (i + 1) == sum(c)
end
#
b = bitrand(n)
t = deepcopy(b)
lshift!(b, t, 0)
@test b.chunks !== t.chunks
for i in (1, 2, 3, 4, 5, 17, 24, 37, 63, 64, n, n + 1)
lshift!(b, t, 0)
lshift!(b, i)
lshift!(c, t, i)
@test sum(b) == sum(t[1:n-i]) == sum(c)
@test b == (t >> i)
@test b == lshift(t, i) == lshift(t, i)
end
end
end
| BitVectorExtensions | https://github.com/andrewjradcliffe/BitVectorExtensions.jl.git |
|
[
"MIT"
] | 0.1.0 | ac648f8005b4c75583b150be2073edc67433bb0c | docs | 975 | # BitVectorExtensions
## Installation
```julia
using Pkg
Pkg.add("BitVectorExtensions")
```
## Description
This [PR](https://github.com/JuliaLang/julia/pull/45728) as a
standalone. The constructor from `Unsigned` exhibits type piracy, so
beware.
Preface: there are two distinct concepts here -- a constructor for
`BitVector` from unsigned integers, and `l/r-shift[!]` methods which
match the corresponding shifts on bit indices. These methods may be
useful if you find the motivation (or need) to work with raw bits,
then later wish to use these raw bits as indices, in which case the
most natural abstraction is a `BitVector`.
Admittedly a niche application (from the perspective of those
fortunate souls not forced into bit-twiddling by pure efficiency
concerns), but why roll your own abstraction over raw bits when `Base`
provides such a rich interface with well-tested methods? Or, perhaps
someone seeking to store the contents of a `BitVector` in a text
format.
| BitVectorExtensions | https://github.com/andrewjradcliffe/BitVectorExtensions.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 327 | # Copied this from Documenter.jl
# Only run coverage from linux nightly build on travis.
get(ENV, "TRAVIS_OS_NAME", "") == "linux" || exit()
get(ENV, "TRAVIS_JULIA_VERSION", "") == "nightly" || exit()
using Coverage
cd(joinpath(dirname(@__FILE__), "..")) do
Codecov.submit(Codecov.process_folder())
end | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 504 | push!(LOAD_PATH,"../src/")
using Autologistic
using Graphs, DataFrames, CSV, Plots
using Documenter
DocMeta.setdocmeta!(Autologistic, :DocTestSetup, :(using Autologistic); recursive=true)
makedocs(
sitename = "Autologistic.jl",
modules = [Autologistic],
pages = [
"index.md",
"Background.md",
"Design.md",
"BasicUsage.md",
"Examples.md",
"api.md"
]
)
deploydocs(
repo = "github.com/kramsretlow/Autologistic.jl.git",
) | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 5562 | """
ALRsimple
An autologistic regression model with "simple smoothing": the unary parameter is of type
`LinPredUnary`, and the pairwise parameter is of type `SimplePairwise`.
# Constructors
ALRsimple(unary::LinPredUnary, pairwise::SimplePairwise;
Y::Union{Nothing,<:VecOrMat} = nothing,
centering::CenteringKinds = none,
coding::Tuple{Real,Real} = (-1,1),
labels::Tuple{String,String} = ("low","high"),
coordinates::SpatialCoordinates = [(0.0,0.0) for i=1:size(unary,1)]
)
ALRsimple(graph::SimpleGraph{Int}, X::Float2D3D;
Y::VecOrMat = Array{Bool,2}(undef,nv(graph),size(X,3)),
β::Vector{Float64} = zeros(size(X,2)),
λ::Float64 = 0.0,
centering::CenteringKinds = none,
coding::Tuple{Real,Real} = (-1,1),
labels::Tuple{String,String} = ("low","high"),
coordinates::SpatialCoordinates = [(0.0,0.0) for i=1:nv(graph)]
)
# Arguments
- `Y`: the array of dichotomous responses. Any array with 2 unique values will work.
If the array has only one unique value, it must equal one of the coding values. The
supplied object will be internally represented as a Boolean array.
- `β`: the regression coefficients.
- `λ`: the association parameter.
- `centering`: controls what form of centering to use.
- `coding`: determines the numeric coding of the dichotomous responses.
- `labels`: a 2-tuple of text labels describing the meaning of `Y`. The first element
is the label corresponding to the lower coding value.
- `coordinates`: an array of 2- or 3-tuples giving spatial coordinates of each vertex in
the graph.
# Examples
```jldoctest
julia> using Graphs
julia> X = rand(10,3); #-predictors
julia> Y = rand([-2, 3], 10); #-responses
julia> g = Graph(10,20); #-graph
julia> u = LinPredUnary(X);
julia> p = SimplePairwise(g);
julia> model1 = ALRsimple(u, p, Y=Y);
julia> model2 = ALRsimple(g, X, Y=Y);
julia> all([getfield(model1, fn)==getfield(model2, fn) for fn in fieldnames(ALRsimple)])
true
```
"""
mutable struct ALRsimple{C<:CenteringKinds,
R<:Real,
S<:SpatialCoordinates} <: AbstractAutologisticModel
responses::Array{Bool,2}
unary::LinPredUnary
pairwise::SimplePairwise
centering::C
coding::Tuple{R,R}
labels::Tuple{String,String}
coordinates::S
function ALRsimple(y, u, p, c::C, cod::Tuple{R,R}, lab, coords::S) where {C,R,S}
if !(size(y) == size(u) == size(p)[[1,3]])
error("ALRsimple: inconsistent sizes of Y, unary, and pairwise")
end
if cod[1] >= cod[2]
error("ALRsimple: must have coding[1] < coding[2]")
end
if lab[1] == lab[2]
error("ALRsimple: labels must be different")
end
new{C,R,S}(y,u,p,c,cod,lab,coords)
end
end
# === Constructors =============================================================
# Construct from pre-constructed unary and pairwise types.
function ALRsimple(unary::LinPredUnary, pairwise::SimplePairwise;
Y::Union{Nothing,<:VecOrMat}=nothing,
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:size(unary,1)])
(n, m) = size(unary)
if Y==nothing
Y = Array{Bool,2}(undef, n, m)
else
Y = makebool(Y, coding)
end
return ALRsimple(Y,unary,pairwise,centering,coding,labels,coordinates)
end
# Construct from a graph and an X matrix.
function ALRsimple(graph::SimpleGraph{Int}, X::Float2D3D;
Y::VecOrMat=Array{Bool,2}(undef,nv(graph),size(X,3)),
β::Vector{Float64}=zeros(size(X,2)),
λ::Float64=0.0, centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:nv(graph)])
u = LinPredUnary(X, β)
p = SimplePairwise(λ, graph, size(X,3))
return ALRsimple(makebool(Y,coding),u,p,centering,coding,labels,coordinates)
end
# ==============================================================================
# === show methods =============================================================
function show(io::IO, ::MIME"text/plain", m::ALRsimple)
print(io, "Autologistic regression model of type ALRsimple with parameter vector [β; λ].\n",
"Fields:\n",
showfields(m,2))
end
function showfields(m::ALRsimple, leadspaces=0)
spc = repeat(" ", leadspaces)
return spc * "responses $(size2string(m.responses)) Bool array\n" *
spc * "unary $(size2string(m.unary)) LinPredUnary with fields:\n" *
showfields(m.unary, leadspaces+15) *
spc * "pairwise $(size2string(m.pairwise)) SimplePairwise with fields:\n" *
showfields(m.pairwise, leadspaces+15) *
spc * "centering $(m.centering)\n" *
spc * "coding $(m.coding)\n" *
spc * "labels $(m.labels)\n" *
spc * "coordinates $(size2string(m.coordinates)) vector of $(eltype(m.coordinates))\n"
end
# ==============================================================================
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 11957 | """
ALfit
A type to hold estimation output for autologistic models. Fitting functions return an
object of this type.
Depending on the fitting method, some fields might not be set. Fields that are not used
are set to `nothing` or to zero-dimensional arrays. The fields are:
* `estimate`: A vector of parameter estimates.
* `se`: A vector of standard errors for the estimates.
* `pvalues`: A vector of p-values for testing the null hypothesis that the parameters equal
zero (one-at-a time hypothesis tests).
* `CIs`: A vector of 95% confidence intervals for the parameters (a vector of 2-tuples).
* `optim`: the output of the call to `optimize` used to get the estimates.
* `Hinv` (used by `fit_ml!`): The inverse of the Hessian matrix of the objective function,
evaluated at the estimate.
* `nboot` (`fit_pl!`): number of bootstrap samples to use for error estimation.
* `kwargs` (`fit_pl!`): holds extra keyword arguments passed in the call to the fitting
function.
* `bootsamples` (`fit_pl!`): the bootstrap samples.
* `bootestimates` (`fit_pl!`): the bootstrap parameter estimates.
* `convergence`: either a Boolean indicating optimization convergence ( for `fit_ml!`), or
a vector of such values for the optimizations done to estimate bootstrap replicates.
The empty constructor `ALfit()` will initialize an object with all fields empty, so the
needed fields can be filled afterwards.
"""
mutable struct ALfit
estimate::Vector{Float64}
se::Vector{Float64}
pvalues::Vector{Float64}
CIs::Vector{Tuple{Float64,Float64}}
optim
Hinv::Array{Float64,2}
nboot::Int
kwargs
bootsamples
bootestimates
convergence
end
# Constructor with no arguments - for object creation. Initialize everything to empty or
# nothing.
ALfit() = ALfit(zeros(Float64,0),
zeros(Float64,0),
zeros(Float64,0),
Vector{Tuple{Float64,Float64}}(undef,0),
nothing,
zeros(Float64,0,0),
0,
nothing,
nothing,
nothing,
nothing)
# === show methods =============================================================
show(io::IO, f::ALfit) = print(io, "ALfit")
function show(io::IO, ::MIME"text/plain", f::ALfit)
print(io, "Autologistic model fitting results. Its non-empty fields are:\n",
showfields(f,2), "Use summary(fit; [parnames, sigdigits]) to see a table of estimates.\n",
"For pseudolikelihood, use oneboot() and addboot!() to add bootstrap after the fact.")
end
function showfields(f::ALfit, leadspaces=0)
spc = repeat(" ", leadspaces)
out = ""
if length(f.estimate) > 0
out *= spc * "estimate " *
"$(size2string(f.estimate)) vector of parameter estimates\n"
end
if length(f.se) > 0
out *= spc * "se " *
"$(size2string(f.se)) vector of standard errors\n"
end
if length(f.pvalues) > 0
out *= spc * "pvalues " *
"$(size2string(f.pvalues)) vector of 2-sided p-values\n"
end
if length(f.CIs) > 0
out *= spc * "CIs " *
"$(size2string(f.CIs)) vector of 95% confidence intervals (as tuples)\n"
end
if f.optim !== nothing
out *= spc * "optim " *
"the output of the call to optimize()\n"
end
if length(f.Hinv) > 0
out *= spc * "Hinv " *
"the inverse of the Hessian, evaluated at the optimum\n"
end
if f.nboot > 0
out *= spc * "nboot " *
"the number of bootstrap replicates drawn\n"
end
if f.kwargs !== nothing
out *= spc * "kwargs " *
"extra keyword arguments passed to sample()\n"
end
if f.bootsamples !== nothing
out *= spc * "bootsamples " *
"$(size2string(f.bootsamples)) array of bootstrap replicates\n"
end
if f.bootestimates !== nothing
out *= spc * "bootestimates " *
"$(size2string(f.bootestimates)) array of bootstrap estimates\n"
end
if f.convergence !== nothing
if length(f.convergence) == 1
out *= spc * "convergence " *
"$(f.convergence)\n"
else
out *= spc * "convergence " *
"$(size2string(f.convergence)) vector of convergence flags " *
"($(sum(f.convergence .== false)) false)\n"
end
end
if out == ""
out = spc * "(all fields empty)\n"
end
return out
end
# ==============================================================================
# Line up all strings in rows 2:end of a column of String matrix S, so that a certain
# character (e.g. decimal point or comma) aligns. Do this by prepending spaces.
# After processing, text will line up but strings still might not be all the same length.
function align!(S, col, char)
nrow = size(S,1)
locs = findfirst.(isequal(char), S[2:nrow,col])
# If no char found - make all strings same length
if all(locs .== nothing)
lengths = length.(S[2:nrow,col])
maxlength = maximum(lengths)
for i = 2:nrow
S[i,col] = repeat(" ", maxlength - lengths[i-1]) * S[i,col]
end
return
end
# Otherwise, align the characters
maxloc = any(locs .== nothing) ? maximum(length.(S[2:nrow,col])) : maximum(locs)
for i = 2:nrow
if locs[i-1] == nothing
continue
end
S[i,col] = repeat(" ", maxloc - locs[i-1]) * S[i,col]
end
end
function summary(io::IO, f::ALfit; parnames=nothing, sigdigits=3)
npar = length(f.estimate)
if npar==0
println(io, "No estimates to tabulate")
return
end
if parnames != nothing && length(parnames) !== npar
error("parnames vector is not the correct length")
end
# Create the matrix of strings, and add header row and "p-values" and "CIs" columns
# (only include the "p-value" column if it's a ML estimate).
if f.bootestimates == nothing
out = Matrix{String}(undef, npar+1, 5)
out[1,:] = ["name", "est", "se", "p-value", "95% CI"]
out[2:npar+1, 4] = length(f.pvalues)==0 ? ["" for i=1:npar] :
string.(round.(f.pvalues,sigdigits=sigdigits))
out[2:npar+1, 5] = length(f.CIs)==0 ? ["" for i=1:npar] :
[string(round.((f.CIs[i][1], f.CIs[i][2]),sigdigits=sigdigits)) for i=1:npar]
align!(out, 4, '.')
align!(out, 5, ',')
else
out = Matrix{String}(undef, npar+1, 4)
out[1,:] = ["name", "est", "se", "95% CI"]
out[2:npar+1, 4] = length(f.CIs)==0 ? ["" for i=1:npar] :
[string(round.((f.CIs[i][1], f.CIs[i][2]),sigdigits=sigdigits)) for i=1:npar]
align!(out, 4, ',')
end
# Fill in the other columns
for i = 2:npar+1
out[i,1] = parnames==nothing ? "parameter $(i-1)" : parnames[i-1]
end
out[2:npar+1, 2] = string.(round.(f.estimate,sigdigits=sigdigits))
out[2:npar+1, 3] = length(f.se)==0 ? ["" for i=1:npar] :
string.(round.(f.se,sigdigits=sigdigits))
align!(out, 2, '.')
align!(out, 3, '.')
nrow, ncol = size(out)
colwidths = [maximum(length.(out[:,i])) for i=1:ncol]
for i = 1:nrow
for j = 1:ncol
print(io, out[i,j], repeat(" ", colwidths[j]-length(out[i,j])))
if j < ncol
print(io, " ")
else
print(io, "\n")
end
end
end
end
function summary(f::ALfit; parnames=nothing, sigdigits=3)
summary(stdout, f; parnames=parnames, sigdigits=sigdigits)
end
"""
addboot!(fit::ALfit, bootsamples::Array{Float64,3},
bootestimates::Array{Float64,2}, convergence::Vector{Bool})
Add parametric bootstrap information in arrays `bootsamples`, `bootestimates`, and
`convergence` to model fitting information `fit`. If `fit` already contains bootstrap
data, the new data is appended to the existing data, and statistics are recomputed.
# Examples
```jldoctest
julia> using Random;
julia> Random.seed!(1234);
julia> G = makegrid4(4,3).G;
julia> Y=[[fill(-1,4); fill(1,8)] [fill(-1,3); fill(1,9)] [fill(-1,5); fill(1,7)]];
julia> model = ALRsimple(G, ones(12,1,3), Y=Y);
julia> fit = fit_pl!(model, start=[-0.4, 1.1]);
julia> samps = zeros(12,3,10);
julia> ests = zeros(2,10);
julia> convs = fill(false, 10);
julia> for i = 1:10
temp = oneboot(model, start=[-0.4, 1.1])
samps[:,:,i] = temp.sample
ests[:,i] = temp.estimate
convs[i] = temp.convergence
end
julia> addboot!(fit, samps, ests, convs)
julia> summary(fit)
name est se 95% CI
parameter 1 -0.39 0.442 (-1.09, 0.263)
parameter 2 1.1 0.279 (-0.00664, 0.84)
```
"""
function addboot!(fit::ALfit,
bootsamples::Array{Float64,3},
bootestimates::Array{Float64,2},
convergence::Vector{Bool})
if size(bootsamples,2) == 1
bootsamples = dropdims(bootsamples, dims=2)
end
if fit.bootsamples != nothing
fit.bootsamples = cat(fit.bootsamples, bootsamples, dims=ndims(bootsamples))
fit.bootestimates = [fit.bootestimates bootestimates]
fit.convergence = [fit.convergence; convergence]
else
fit.bootsamples = bootsamples
fit.bootestimates = bootestimates
fit.convergence = convergence
end
ix = findall(convergence)
if length(ix) < length(convergence)
println("NOTE: $(length(convergence) - length(ix)) entries have convergence==false.",
" Omitting these in calculations.")
end
npar = size(bootestimates,1)
fit.se = std(fit.bootestimates[:,ix], dims=2)[:]
fit.CIs = [(0.0, 0.0) for i=1:npar]
for i = 1:npar
fit.CIs[i] = (quantile(bootestimates[i,ix],0.025), quantile(bootestimates[i,ix],0.975))
end
end
"""
addboot!(fit::ALfit, bootresults::Array{T,1}) where
T <: NamedTuple{(:sample, :estimate, :convergence)}
An `addboot!` method taking bootstrap data as an array of named tuples. Tuples are of the
form produced by `oneboot`.
# Examples
```jldoctest
julia> using Random;
julia> Random.seed!(1234);
julia> G = makegrid4(4,3).G;
julia> Y=[[fill(-1,4); fill(1,8)] [fill(-1,3); fill(1,9)] [fill(-1,5); fill(1,7)]];
julia> model = ALRsimple(G, ones(12,1,3), Y=Y);
julia> fit = fit_pl!(model, start=[-0.4, 1.1]);
julia> boots = [oneboot(model, start=[-0.4, 1.1]) for i = 1:10];
julia> addboot!(fit, boots)
julia> summary(fit)
name est se 95% CI
parameter 1 -0.39 0.442 (-1.09, 0.263)
parameter 2 1.1 0.279 (-0.00664, 0.84)
```
"""
function addboot!(fit::ALfit, bootresults::Array{T,1}) where
T <: NamedTuple{(:sample, :estimate, :convergence)}
nboot = length(bootresults)
npar = length(bootresults[1].estimate)
n = size(bootresults[1].sample, 1)
m = size(bootresults[1].sample, 2) #n,m = size(...) won't work (sample may be 1D or 2D)
bootsamples = Array{Float64}(undef, n, m, nboot)
bootestimates = Array{Float64}(undef, npar, nboot)
convergence = Array{Bool}(undef, nboot)
for i = 1:nboot
bootsamples[:,:,i] = bootresults[i].sample
bootestimates[:,i] = bootresults[i].estimate
convergence[i] = bootresults[i].convergence
end
addboot!(fit, bootsamples, bootestimates, convergence)
end
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 6615 | """
ALfull
An autologistic model with a `FullUnary` unary parameter type and a `FullPairwise`
pairwise parameter type. This model has the maximum number of unary parameters
(one parameter per variable per observation), and an association matrix with one
parameter per edge in the graph.
# Constructors
ALfull(unary::FullUnary, pairwise::FullPairwise;
Y::Union{Nothing,<:VecOrMat}=nothing,
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:size(unary,1)]
)
ALfull(graph::SimpleGraph{Int}, alpha::Float1D2D, lambda::Vector{Float64};
Y::VecOrMat=Array{Bool,2}(undef,nv(graph),size(alpha,2)),
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:nv(graph)]
)
ALfull(graph::SimpleGraph{Int}, count::Int=1;
Y::VecOrMat=Array{Bool,2}(undef,nv(graph),count),
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:nv(graph)]
)
# Arguments
- `Y`: the array of dichotomous responses. Any array with 2 unique values will work.
If the array has only one unique value, it must equal one of th coding values. The
supplied object will be internally represented as a Boolean array.
- `centering`: controls what form of centering to use.
- `coding`: determines the numeric coding of the dichotomous responses.
- `labels`: a 2-tuple of text labels describing the meaning of `Y`. The first element
is the label corresponding to the lower coding value.
- `coordinates`: an array of 2- or 3-tuples giving spatial coordinates of each vertex in
the graph.
# Examples
```jldoctest
julia> g = Graph(10, 20); #-graph (20 edges)
julia> alpha = zeros(10, 4); #-unary parameter values
julia> lambda = rand(20); #-pairwise parameter values
julia> Y = rand([0, 1], 10, 4); #-responses
julia> u = FullUnary(alpha);
julia> p = FullPairwise(g, 4);
julia> setparameters!(p, lambda);
julia> model1 = ALfull(u, p, Y=Y);
julia> model2 = ALfull(g, alpha, lambda, Y=Y);
julia> model3 = ALfull(g, 4, Y=Y);
julia> setparameters!(model3, [alpha[:]; lambda]);
julia> all([getfield(model1, fn)==getfield(model2, fn)==getfield(model3, fn)
for fn in fieldnames(ALfull)])
true
```
"""
mutable struct ALfull{C<:CenteringKinds,
R<:Real,
S<:SpatialCoordinates} <: AbstractAutologisticModel
responses::Array{Bool,2}
unary::FullUnary
pairwise::FullPairwise
centering::C
coding::Tuple{R,R}
labels::Tuple{String,String}
coordinates::S
function ALfull(y, u, p, c::C, cod::Tuple{R,R}, lab, coords::S) where {C,R,S}
if !(size(y) == size(u) == size(p)[[1,3]])
error("ALfull: inconsistent sizes of Y, unary, and pairwise")
end
if cod[1] >= cod[2]
error("ALfull: must have coding[1] < coding[2]")
end
if lab[1] == lab[2]
error("ALfull: labels must be different")
end
new{C,R,S}(y,u,p,c,cod,lab,coords)
end
end
# === Constructors =============================================================
# Construct from pre-constructed unary and pairwise types.
function ALfull(unary::FullUnary, pairwise::FullPairwise;
Y::Union{Nothing,<:VecOrMat}=nothing,
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:size(unary,1)])
(n, m) = size(unary)
if Y==nothing
Y = Array{Bool,2}(undef, n, m)
else
Y = makebool(Y,coding)
end
return ALfull(Y,unary,pairwise,centering,coding,labels,coordinates)
end
# Construct from a graph, an array of unary parameters, and a vector of pairwise parameters.
function ALfull(graph::SimpleGraph{Int}, alpha::Float1D2D, lambda::Vector{Float64};
Y::VecOrMat=Array{Bool,2}(undef,nv(graph),size(alpha,2)),
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:nv(graph)])
u = FullUnary(alpha)
p = FullPairwise(graph, size(alpha,2))
setparameters!(p, lambda)
return ALfull(makebool(Y,coding),u,p,centering,coding,labels,coordinates)
end
# Construct from a graph and a number of observations
function ALfull(graph::SimpleGraph{Int}, count::Int=1;
Y::VecOrMat=Array{Bool,2}(undef,nv(graph),count),
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:nv(graph)])
u = FullUnary(nv(graph),count)
p = FullPairwise(graph, count)
return ALfull(makebool(Y,coding),u,p,centering,coding,labels,coordinates)
end
# ==============================================================================
# === show methods =============================================================
function show(io::IO, ::MIME"text/plain", m::ALfull)
print(io, "Autologistic model of type ALfull with parameter vector [α; Λ].\n",
"Fields:\n",
showfields(m,2))
end
function showfields(m::ALfull, leadspaces=0)
spc = repeat(" ", leadspaces)
return spc * "responses $(size2string(m.responses)) Bool array\n" *
spc * "unary $(size2string(m.unary)) FullUnary with fields:\n" *
showfields(m.unary, leadspaces+15) *
spc * "pairwise $(size2string(m.pairwise)) FullPairwise with fields:\n" *
showfields(m.pairwise, leadspaces+15) *
spc * "centering $(m.centering)\n" *
spc * "coding $(m.coding)\n" *
spc * "labels $(m.labels)\n" *
spc * "coordinates $(size2string(m.coordinates)) vector of $(eltype(m.coordinates))\n"
end
# ==============================================================================
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 6588 | """
ALsimple
An autologistic model with a `FullUnary` unary parameter type and a `SimplePairwise`
pairwise parameter type. This model has the maximum number of unary parameters
(one parameter per variable per observation), and a single association parameter.
# Constructors
ALsimple(unary::FullUnary, pairwise::SimplePairwise;
Y::Union{Nothing,<:VecOrMat} = nothing,
centering::CenteringKinds = none,
coding::Tuple{Real,Real} = (-1,1),
labels::Tuple{String,String} = ("low","high"),
coordinates::SpatialCoordinates = [(0.0,0.0) for i=1:size(unary,1)]
)
ALsimple(graph::SimpleGraph{Int}, alpha::Float1D2D;
Y::VecOrMat = Array{Bool,2}(undef,nv(graph),size(alpha,2)),
λ::Float64 = 0.0,
centering::CenteringKinds = none,
coding::Tuple{Real,Real} = (-1,1),
labels::Tuple{String,String} = ("low","high"),
coordinates::SpatialCoordinates = [(0.0,0.0) for i=1:nv(graph)]
)
ALsimple(graph::SimpleGraph{Int}, count::Int = 1;
Y::VecOrMat = Array{Bool,2}(undef,nv(graph),size(alpha,2)),
λ::Float64=0.0,
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:nv(graph)]
)
# Arguments
- `Y`: the array of dichotomous responses. Any array with 2 unique values will work.
If the array has only one unique value, it must equal one of th coding values. The
supplied object will be internally represented as a Boolean array.
- `λ`: the association parameter.
- `centering`: controls what form of centering to use.
- `coding`: determines the numeric coding of the dichotomous responses.
- `labels`: a 2-tuple of text labels describing the meaning of `Y`. The first element
is the label corresponding to the lower coding value.
- `coordinates`: an array of 2- or 3-tuples giving spatial coordinates of each vertex in
the graph.
# Examples
```jldoctest
julia> alpha = zeros(10, 4); #-unary parameter values
julia> Y = rand([0, 1], 10, 4); #-responses
julia> g = Graph(10, 20); #-graph
julia> u = FullUnary(alpha);
julia> p = SimplePairwise(g, 4);
julia> model1 = ALsimple(u, p, Y=Y);
julia> model2 = ALsimple(g, alpha, Y=Y);
julia> model3 = ALsimple(g, 4, Y=Y);
julia> all([getfield(model1, fn)==getfield(model2, fn)==getfield(model3, fn)
for fn in fieldnames(ALsimple)])
true
```
"""
mutable struct ALsimple{C<:CenteringKinds,
R<:Real,
S<:SpatialCoordinates} <: AbstractAutologisticModel
responses::Array{Bool,2}
unary::FullUnary
pairwise::SimplePairwise
centering::C
coding::Tuple{R,R}
labels::Tuple{String,String}
coordinates::S
function ALsimple(y, u, p, c::C, cod::Tuple{R,R}, lab, coords::S) where {C,R,S}
if !(size(y) == size(u) == size(p)[[1,3]])
error("ALsimple: inconsistent sizes of Y, unary, and pairwise")
end
if cod[1] >= cod[2]
error("ALsimple: must have coding[1] < coding[2]")
end
if lab[1] == lab[2]
error("ALsimple: labels must be different")
end
new{C,R,S}(y,u,p,c,cod,lab,coords)
end
end
# === Constructors =============================================================
# Construct from pre-constructed unary and pairwise types.
function ALsimple(unary::FullUnary, pairwise::SimplePairwise;
Y::Union{Nothing,<:VecOrMat}=nothing,
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:size(unary,1)])
(n, m) = size(unary)
if Y==nothing
Y = Array{Bool,2}(undef, n, m)
else
Y = makebool(Y,coding)
end
return ALsimple(Y,unary,pairwise,centering,coding,labels,coordinates)
end
# Construct from a graph and an array of unary parameters.
function ALsimple(graph::SimpleGraph{Int}, alpha::Float1D2D;
Y::VecOrMat=Array{Bool,2}(undef,nv(graph),size(alpha,2)),
λ::Float64=0.0,
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:nv(graph)])
u = FullUnary(alpha)
p = SimplePairwise(λ, graph, size(alpha,2))
return ALsimple(makebool(Y,coding),u,p,centering,coding,labels,coordinates)
end
# Construct from a graph and a number of observations
function ALsimple(graph::SimpleGraph{Int}, count::Int=1;
Y::VecOrMat=Array{Bool,2}(undef,nv(graph),count),
λ::Float64=0.0,
centering::CenteringKinds=none,
coding::Tuple{Real,Real}=(-1,1),
labels::Tuple{String,String}=("low","high"),
coordinates::SpatialCoordinates=[(0.0,0.0) for i=1:nv(graph)])
u = FullUnary(nv(graph),count)
p = SimplePairwise(λ, graph, count)
return ALsimple(makebool(Y,coding),u,p,centering,coding,labels,coordinates)
end
# ==============================================================================
# === show methods =============================================================
function show(io::IO, ::MIME"text/plain", m::ALsimple)
print(io, "Autologistic model of type ALsimple with parameter vector [α; λ].\n",
"Fields:\n",
showfields(m,2))
end
function showfields(m::ALsimple, leadspaces=0)
spc = repeat(" ", leadspaces)
return spc * "responses $(size2string(m.responses)) Bool array\n" *
spc * "unary $(size2string(m.unary)) FullUnary with fields:\n" *
showfields(m.unary, leadspaces+15) *
spc * "pairwise $(size2string(m.pairwise)) SimplePairwise with fields:\n" *
showfields(m.pairwise, leadspaces+15) *
spc * "centering $(m.centering)\n" *
spc * "coding $(m.coding)\n" *
spc * "labels $(m.labels)\n" *
spc * "coordinates $(size2string(m.coordinates)) vector of $(eltype(m.coordinates))\n"
end
# ==============================================================================
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 1898 | module Autologistic
using Graphs: Graph, SimpleGraph, nv, ne, adjacency_matrix, edges, add_edge!
using LinearAlgebra: norm, diag, triu, I
using SparseArrays: SparseMatrixCSC, sparse, spzeros
using CSV: read
using Optim: optimize, Options, converged, BFGS
using Distributions: Normal, cdf
using SharedArrays: SharedArray
using Random: seed!, rand, randn
using Distributed: @distributed, workers
using Statistics: mean, std, quantile
using DataFrames
import Base: show, getindex, setindex!, summary, size, IndexStyle, length
import Distributions: sample
export
#----- types -----
AbstractAutologisticModel,
AbstractPairwiseParameter,
AbstractUnaryParameter,
ALfit,
ALfull,
ALRsimple,
ALsimple,
FullPairwise,
FullUnary,
SpatialCoordinates,
LinPredUnary,
SimplePairwise,
#----- enums -----
CenteringKinds, none, expectation, onehalf,
SamplingMethods, Gibbs, perfect_reuse_samples, perfect_reuse_seeds, perfect_read_once, perfect_bounding_chain,
#----- functions -----
addboot!,
centeringterms,
conditionalprobabilities,
fit_ml!,
fit_pl!,
fullPMF,
getparameters,
getpairwiseparameters,
getunaryparameters,
loglikelihood,
makegrid4,
makegrid8,
makebool,
makecoded,
makespatialgraph,
marginalprobabilities,
negpotential,
oneboot,
pseudolikelihood,
sample,
setparameters!,
setunaryparameters!,
setpairwiseparameters!
include("common.jl")
include("ALfit_type.jl")
include("abstractautologisticmodel_type.jl")
include("abstractunaryparameter_type.jl")
include("abstractpairwiseparameter_type.jl")
include("fullpairwise_type.jl")
include("fullunary_type.jl")
include("linpredunary_type.jl")
include("simplepairwise_type.jl")
include("ALsimple_type.jl")
include("ALfull_type.jl")
include("ALRsimple_type.jl")
include("samplers.jl")
end # module
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 31131 | """
AbstractAutologisticModel
Abstract type representing autologistic models. All concrete subtypes should have the
following fields:
* `responses::Array{Bool,2}` -- The binary observations. Rows are for nodes in the
graph, and columns are for independent (vector) observations. It is a 2D array even if
there is only one observation.
* `unary<:AbstractUnaryParameter` -- Specifies the unary part of the model.
* `pairwise<:AbstractPairwiseParameter` -- Specifies the pairwise part of the model
(including the graph).
* `centering<:CenteringKinds` -- Specifies the form of centering used, if any.
* `coding::Tuple{T,T} where T<:Real` -- Gives the numeric coding of the responses.
* `labels::Tuple{String,String}` -- Provides names for the high and low states.
* `coordinates<:SpatialCoordinates` -- Provides 2D or 3D coordinates for each vertex in
the graph.
This type has the following functions defined, considered part of the type's interface.
They cover most operations one will want to perform. Concrete subtypes should not have to
define custom overrides unless more specialized or efficient algorithms exist for the
subtype.
* `getparameters` and `setparameters!`
* `getunaryparameters` and `setunaryparameters!`
* `getpairwiseparameters` and `setpairwiseparameters!`
* `centeringterms`
* `negpotential`, `pseudolikelihood`, and `loglikelihood`
* `fullPMF`, `marginalprobabilities`, and `conditionalprobabilities`
* `fit_pl!` and `fit_ml!`
* `sample` and `oneboot`
* `showfields`
# Examples
```jldoctest
julia> M = ALsimple(Graph(4,4));
julia> typeof(M)
ALsimple{CenteringKinds,Int64,Nothing}
julia> isa(M, AbstractAutologisticModel)
true
```
"""
abstract type AbstractAutologisticModel end
# === Getting/setting parameters ===============================================
"""
getparameters(x)
A generic function for extracting the parameters from an autologistic model, a unary term,
or a pairwise term. Parameters are always returned as an `Array{Float64,1}`. If
`typeof(x) <: AbstractAutologisticModel`, the returned vector is partitioned with the unary
parameters first.
"""
getparameters(M::AbstractAutologisticModel) = [getparameters(M.unary); getparameters(M.pairwise)]
"""
getunaryparameters(M::AbstractAutologisticModel)
Extracts the unary parameters from an autologistic model. Parameters are always returned as
an `Array{Float64,1}`.
"""
getunaryparameters(M::AbstractAutologisticModel) = getparameters(M.unary)
"""
getpairwiseparameters(M::AbstractAutologisticModel)
Extracts the pairwise parameters from an autologistic model. Parameters are always returned as
an `Array{Float64,1}`.
"""
getpairwiseparameters(M::AbstractAutologisticModel) = getparameters(M.pairwise)
"""
setparameters!(x, newpars::Vector{Float64})
A generic function for setting the parameter values of an autologistic model, a unary term,
or a pairwise term. Parameters are always passed as an `Array{Float64,1}`. If
`typeof(x) <: AbstractAutologisticModel`, the `newpars` is assumed partitioned with the
unary parameters first.
"""
function setparameters!(M::AbstractAutologisticModel, newpars::Vector{Float64})
p, q = (length(getunaryparameters(M)), length(getpairwiseparameters(M)))
@assert length(newpars) == p + q "newpars has wrong length"
setparameters!(M.unary, newpars[1:p])
setparameters!(M.pairwise, newpars[p+1:p+q])
return newpars
end
"""
setunaryparameters!(M::AbstractAutologisticModel, newpars::Vector{Float64})
Sets the unary parameters of autologistic model `M` to the values in `newpars`.
"""
function setunaryparameters!(M::AbstractAutologisticModel, newpars::Vector{Float64})
setparameters!(M.unary, newpars)
end
"""
setpairwiseparameters!(M::AbstractAutologisticModel, newpars::Vector{Float64})
Sets the pairwise parameters of autologistic model `M` to the values in `newpars`.
"""
function setpairwiseparameters!(M::AbstractAutologisticModel, newpars::Vector{Float64})
setparameters!(M.pairwise, newpars)
end
# ==============================================================================
# === Show Methods==============================================================
show(io::IO, m::AbstractAutologisticModel) = print(io, "$(typeof(m))")
function show(io::IO, ::MIME"text/plain", m::AbstractAutologisticModel)
print(io, "Autologistic model of type $(typeof(m)), \n",
"with $(size(m.unary,1)) vertices, $(size(m.unary, 2)) ",
"$(size(m.unary,2)==1 ? "observation" : "observations") ",
"and fields:\n",
showfields(m,2))
end
function showfields(m::AbstractAutologisticModel, leadspaces=0)
return repeat(" ", leadspaces) *
"(**Autologistic.showfields not implemented for $(typeof(m))**)\n"
end
# ==============================================================================
"""
centeringterms(M::AbstractAutologisticModel, kind::Union{Nothing,CenteringKinds}=nothing)
Returns an `Array{Float64,2}` of the same dimension as `M.unary`, giving the centering
adjustments for autologistic model `M`. `centeringterms(M,kind)` returns the centering
adjustment that would be used if centering were of type `kind`.
# Examples
```jldoctest
julia> G = makegrid8(2,2).G;
julia> X = [ones(4) [-2; -1; 1; 2]];
julia> M1 = ALRsimple(G, X, β=[-1.0, 2.0]); #-No centering (default)
julia> M2 = ALRsimple(G, X, β=[-1.0, 2.0], centering=expectation); #-Centered model
julia> [centeringterms(M1) centeringterms(M2) centeringterms(M1, onehalf)]
4×3 Array{Float64,2}:
0.0 -0.999909 0.5
0.0 -0.995055 0.5
0.0 0.761594 0.5
0.0 0.995055 0.5
```
"""
function centeringterms(M::AbstractAutologisticModel, kind::Union{Nothing,CenteringKinds}=nothing)
k = kind==nothing ? M.centering : kind
if k == none
return fill(0.0, size(M.unary))
elseif k == onehalf
return fill(0.5, size(M.unary))
elseif k == expectation
lo, hi = M.coding
α = M.unary[:,:]
num = lo*exp.(lo*α) + hi*exp.(hi*α)
denom = exp.(lo*α) + exp.(hi*α)
return num./denom
else
error("centering kind not recognized")
end
end
"""
pseudolikelihood(M::AbstractAutologisticModel)
Computes the negative log pseudolikelihood for autologistic model `M`. Returns a `Float64`.
# Examples
```jldoctest
julia> X = [1.1 2.2
1.0 2.0
2.1 1.2
3.0 0.3];
julia> Y = [0; 0; 1; 0];
julia> M3 = ALRsimple(makegrid4(2,2)[1], cat(X,X,dims=3), Y=cat(Y,Y,dims=2),
β=[-0.5, 1.5], λ=1.25, centering=expectation);
julia> pseudolikelihood(M3)
12.333549445795818
```
"""
function pseudolikelihood(M::AbstractAutologisticModel)
out = 0.0
Y = makecoded(M)
mu = centeringterms(M)
lo, hi = M.coding
# Loop through observations
for j = 1:size(Y)[2]
y = Y[:,j]; #-Current observation's values.
α = M.unary[:,j] #-Current observation's unary parameters.
μ = mu[:,j] #-Current observation's centering terms.
Λ = M.pairwise[:,:,j] #-Current observation's assoc. matrix.
s = α + Λ*(y - μ) #-(λ-weighted) neighbour sums + unary.
logPL = sum(y.*s - log.(exp.(lo*s) + exp.(hi*s)))
out = out - logPL #-Subtract this rep's log PL from total.
end
return out
end
function pslik!(θ::Vector{Float64}, M::AbstractAutologisticModel)
setparameters!(M, θ)
return pseudolikelihood(M)
end
"""
oneboot(M::AbstractAutologisticModel;
start=zeros(length(getparameters(M))),
verbose::Bool=false,
kwargs...
)
Performs one parametric bootstrap replication from autologistic model `M`: draw a
random sample from `M`, use that sample as the responses, and re-fit the model. Returns
a named tuple `(:sample, :estimate, :convergence)`, where `:sample` holds the random
sample, `:estimate` holds the parameter estimates, and `:convergence` holds a `bool`
indicating whether or not the optimization converged. The parameters of `M` remain
unchanged by calling `oneboot`.
# Arguments
- `start`: starting parameter values to use for optimization
- `verbose`: should progress information be written to the console?
- `kwargs...`: extra keyword arguments that are passed to `optimize()` or `sample()`, as
appropriate.
# Examples
```jldoctest
julia> G = makegrid4(4,3).G;
julia> model = ALRsimple(G, ones(12,1), Y=[fill(-1,4); fill(1,8)]);
julia> theboot = oneboot(model, method=Gibbs, burnin=250);
julia> fieldnames(typeof(theboot))
(:sample, :estimate, :convergence)```
"""
function oneboot(M::AbstractAutologisticModel;
start=zeros(length(getparameters(M))),
verbose::Bool=false,
kwargs...)
oldY = M.responses;
oldpar = getparameters(M)
optimargs, sampleargs = splitkw(kwargs)
yboot = sample(M; verbose=verbose, sampleargs...)
M.responses = makebool(yboot, M.coding)
thisfit = fit_pl!(M; start=start, verbose=verbose, kwargs...)
M.responses = oldY
setparameters!(M, oldpar)
return (sample=yboot, estimate=thisfit.estimate, convergence=thisfit.convergence)
end
"""
oneboot(M::AbstractAutologisticModel, params::Vector{Float64};
start=zeros(length(getparameters(M))),
verbose::Bool=false,
kwargs...
)
Computes one bootstrap replicate using model `M`, but using parameters `params` for
generating samples, instead of `getparameters(M)`.
"""
function oneboot(M::AbstractAutologisticModel, params::Vector{Float64};
start=zeros(length(getparameters(M))),
verbose::Bool=false,
kwargs...)
oldpar = getparameters(M)
out = oneboot(M; start=start, verbose=verbose, kwargs...)
setparameters!(M, oldpar)
return out
end
"""
fit_pl!(M::AbstractAutologisticModel;
start=zeros(length(getparameters(M))),
verbose::Bool=false,
nboot::Int = 0,
kwargs...)
Fit autologistic model `M` using maximum pseudolikelihood.
# Arguments
- `start`: initial value to use for optimization.
- `verbose`: should progress information be printed to the console?
- `nboot`: number of samples to use for parametric bootstrap error estimation.
If `nboot=0` (the default), no bootstrap is run.
- `kwargs...` extra keyword arguments that are passed on to `optimize()` or `sample()`,
as appropriate.
# Examples
```jldoctest
julia> Y=[[fill(-1,4); fill(1,8)] [fill(-1,3); fill(1,9)] [fill(-1,5); fill(1,7)]];
julia> model = ALRsimple(makegrid4(4,3).G, ones(12,1,3), Y=Y);
julia> fit = fit_pl!(model, start=[-0.4, 1.1]);
julia> summary(fit)
name est se p-value 95% CI
parameter 1 -0.39
parameter 2 1.1
```
"""
function fit_pl!(M::AbstractAutologisticModel;
start=zeros(length(getparameters(M))),
verbose::Bool=false,
nboot::Int = 0,
kwargs...)
originalparameters = getparameters(M)
npar = length(originalparameters)
ret = ALfit()
ret.kwargs = kwargs
optimargs, sampleargs = splitkw(kwargs)
opts = Options(; optimargs...)
if verbose
println("-- Finding the maximum pseudolikelihood estimate --")
println("Calling Optim.optimize with BFGS method...")
end
out = try
optimize(θ -> pslik!(θ,M), start, BFGS(), opts)
catch err
err
end
ret.optim = out
if typeof(out)<:Exception || !converged(out)
setparameters!(M, originalparameters)
@warn "Optim.optimize did not succeed. Model parameters have not been changed."
ret.convergence = false
return ret
else
setparameters!(M, out.minimizer)
ret.estimate = out.minimizer
ret.convergence = true
end
if verbose
println("-- Parametric bootstrap variance estimation --")
if nboot == 0
println("nboot==0. Skipping the bootstrap. Returning point estimates only.")
end
end
if nboot > 0
if length(workers()) > 1
if verbose
println("Attempting parallel bootstrap with $(length(workers())) workers.")
end
n, m = size(M.responses)
bootsamples = SharedArray{Float64}(n, m, nboot)
bootestimates = SharedArray{Float64}(npar, nboot)
convresults = SharedArray{Bool}(nboot)
@sync @distributed for i = 1:nboot
bootout = oneboot(M; start=start, kwargs...)
bootsamples[:,:,i] = bootout.sample
bootestimates[:,i] = bootout.estimate
convresults[i] = bootout.convergence
end
addboot!(ret, Array(bootsamples), Array(bootestimates), Array(convresults))
else
if verbose print("bootstrap iteration 1") end
bootresults = Array{NamedTuple{(:sample, :estimate, :convergence)}}(undef,nboot)
for i = 1:nboot
bootresults[i] = oneboot(M; start=start, kwargs...)
if verbose print(mod(i,10)==0 ? i : "|") end
end
if verbose print("\n") end
addboot!(ret, bootresults)
end
end
return ret
end
# TODO: consider if the main line of linear algebra can be sped up.
"""
negpotential(M::AbstractAutologisticModel)
Returns an m-vector of `Float64` negpotential values, where m is the number of observations
found in `M.responses`.
# Examples
```jldoctest
julia> M = ALsimple(makegrid4(3,3).G, ones(9));
julia> f = fullPMF(M);
julia> exp(negpotential(M)[1])/f.partition ≈ exp(loglikelihood(M))
true
```
"""
function negpotential(M::AbstractAutologisticModel)
Y = makecoded(M)
m = size(Y,2)
out = Array{Float64}(undef, m)
α = M.unary[:,:]
μ = centeringterms(M)
for j = 1:m
Λ = M.pairwise[:,:,j]
out[j] = Y[:,j]'*α[:,j] - Y[:,j]'*Λ*μ[:,j] + Y[:,j]'*Λ*Y[:,j]/2
end
return out
end
"""
fullPMF(M::AbstractAutologisticModel;
indices=1:size(M.unary,2),
force::Bool=false
)
Compute the PMF of an AbstractAutologisticModel, and return a `NamedTuple` `(:table, :partition)`.
For an AbstractAutologisticModel with ``n`` variables and ``m`` observations, `:table` is a
``2^n×(n+1)×m`` array of `Float64`. Each page of the 3D array holds a probability table for
an observation. Each row of the table holds a specific configuration of the responses, with
the corresponding probability in the last column. In the ``m=1`` case, `:table` is a 2D
array.
Output `:partition` is a vector of normalizing constant (a.k.a. partition function) values.
In the ``m=1`` case, it is a scalar `Float64`.
# Arguments
- `indices`: indices of specific observations from which to obtain the output. By
default, all observations are used.
- `force`: calling the function with ``n>20`` will throw an error unless
`force=true`.
# Examples
```jldoctest
julia> M = ALRsimple(Graph(3,0),ones(3,1));
julia> pmf = fullPMF(M);
julia> pmf.table
8×4 Array{Float64,2}:
-1.0 -1.0 -1.0 0.125
-1.0 -1.0 1.0 0.125
-1.0 1.0 -1.0 0.125
-1.0 1.0 1.0 0.125
1.0 -1.0 -1.0 0.125
1.0 -1.0 1.0 0.125
1.0 1.0 -1.0 0.125
1.0 1.0 1.0 0.125
julia> pmf.partition
8.0
```
"""
function fullPMF(M::AbstractAutologisticModel; indices=1:size(M.unary,2),
force::Bool=false)
n, m = size(M.unary)
nc = 2^n
if n>20 && !force
error("Attempting to tabulate a PMF with more than 2^20 configurations."
* "\nIf you really want to do this, set force=true.")
end
if minimum(indices)<1 || maximum(indices)>m
error("observation index out of bounds")
end
lo = M.coding[1]
hi = M.coding[2]
T = zeros(nc, n+1, length(indices))
configs = zeros(nc,n)
partition = zeros(m)
for i in 1:n
inner = [repeat([lo],Int(nc/2^i)); repeat([hi],Int(nc/2^i))]
configs[:,i] = repeat(inner , 2^(i-1) )
end
for i in 1:length(indices)
r = indices[i]
T[:,1:n,i] = configs
α = M.unary[:,r]
Λ = M.pairwise[:,:,r]
μ = centeringterms(M)[:,r]
unnormalized = mapslices(v -> exp.(v'*α - v'*Λ*μ + v'*Λ*v/2), configs, dims=2)
partition[i] = sum(unnormalized)
T[:,n+1,i] = unnormalized / partition[i]
end
if length(indices)==1
T = dropdims(T,dims=3)
partition = partition[1]
end
return (table=T, partition=partition)
end
"""
loglikelihood(M::AbstractAutologisticModel;
force::Bool=false
)
Compute the natural logarithm of the likelihood for autologistic model `M`. This will throw
an error for models with more than 20 vertices, unless `force=true`.
# Examples
```jldoctest
julia> model = ALRsimple(makegrid4(2,2)[1], ones(4,2,3), centering = expectation,
coding = (0,1), Y = repeat([true, true, false, false],1,3));
julia> setparameters!(model, [1.0, 1.0, 1.0]);
julia> loglikelihood(model)
-11.86986109487605
```
"""
function loglikelihood(M::AbstractAutologisticModel; force::Bool=false)
parts = fullPMF(M, force=force).partition
return sum(negpotential(M) .- log.(parts))
end
function negloglik!(θ::Vector{Float64}, M::AbstractAutologisticModel; force::Bool=false)
setparameters!(M,θ)
return -loglikelihood(M, force=force)
end
"""
fit_ml!(M::AbstractAutologisticModel;
start=zeros(length(getparameters(M))),
verbose::Bool=false,
force::Bool=false,
kwargs...
)
Fit autologistic model `M` using maximum likelihood. Will fail for models with more than
20 vertices, unless `force=true`. Use `fit_pl!` for larger models.
# Arguments
- `start`: initial value to use for optimization.
- `verbose`: should progress information be printed to the console?
- `force`: set to `true` to force computation of the likelihood for large models.
- `kwargs...` extra keyword arguments that are passed on to `optimize()`.
# Examples
```jldoctest
julia> G = makegrid4(4,3).G;
julia> model = ALRsimple(G, ones(12,1), Y=[fill(-1,4); fill(1,8)]);
julia> mle = fit_ml!(model);
julia> summary(mle)
name est se p-value 95% CI
parameter 1 0.0791 0.163 0.628 (-0.241, 0.399)
parameter 2 0.425 0.218 0.0511 (-0.00208, 0.852)
```
"""
function fit_ml!(M::AbstractAutologisticModel;
start=zeros(length(getparameters(M))),
verbose::Bool=false,
force::Bool=false,
kwargs...)
originalparameters = getparameters(M)
npar = length(originalparameters)
ret = ALfit()
ret.kwargs = kwargs
opts = Options(; kwargs...)
if verbose
println("Calling Optim.optimize with BFGS method...")
end
out = try
optimize(θ -> negloglik!(θ,M,force=force), start, BFGS(), opts)
catch err
err
end
ret.optim = out
if typeof(out)<:Exception || !converged(out)
setparameters!(M, originalparameters)
@warn "Optim.optimize did not succeed. Model parameters have not been changed."
ret.convergence = false
return ret
end
if verbose
println("Approximating the Hessian at the MLE...")
end
H = hess(θ -> negloglik!(θ,M), out.minimizer)
if verbose
println("Getting standard errors...")
end
Hinv = inv(H)
SE = sqrt.(diag(Hinv))
pvals = zeros(npar)
CIs = [(0.0, 0.0) for i=1:npar]
for i = 1:npar
N = Normal(0,SE[i])
pvals[i] = 2*(1 - cdf(N, abs(out.minimizer[i])))
CIs[i] = out.minimizer[i] .+ (quantile(N,0.025), quantile(N,0.975))
end
setparameters!(M, out.minimizer)
ret.estimate = out.minimizer
ret.se = SE
ret.pvalues = pvals
ret.CIs = CIs
ret.Hinv = Hinv
ret.convergence = true
if verbose
println("...completed successfully.")
end
return ret
end
"""
marginalprobabilities(M::AbstractAutologisticModel;
indices=1:size(M.unary,2),
force::Bool=false
)
Compute the marginal probability that variables in autologistic model `M` takes the high
state. For a model with n vertices and m observations, returns an n-by-m array
(or an n-vector if m==1). The [i,j]th element is the marginal probability of the high state
in the ith variable at the jth observation.
This function computes the exact marginals. For large models, approximate the marginal
probabilities by sampling, e.g. `sample(M, ..., average=true)`.
# Arguments
- `indices`: used to return only the probabilities for certain observations.
- `force`: the function will throw an error for n > 20 unless `force=true`.
# Examples
```jldoctest
julia> M = ALsimple(Graph(3,0), [[-1.0; 0.0; 1.0] [-1.0; 0.0; 1.0]])
julia> marginalprobabilities(M)
3×2 Array{Float64,2}:
0.119203 0.119203
0.5 0.5
0.880797 0.880797
```
"""
function marginalprobabilities(M::AbstractAutologisticModel; indices=1:size(M.unary,2),
force::Bool=false)
n, m = size(M.unary)
nc = 2^n
if n>20 && !force
error("Attempting to tabulate a PMF with more than 2^20 configurations."
* "\nIf you really want to do this, set force=true.")
end
if minimum(indices)<1 || maximum(indices)>m
error("observation index out of bounds")
end
hi = M.coding[2]
out = zeros(n,length(indices))
tbl = fullPMF(M).table
for j = 1:length(indices)
r = indices[j]
for i = 1:n
out[i,j] = sum(mapslices(x -> x[i]==hi ? x[n+1] : 0.0, tbl[:,:,r], dims=2))
end
end
if length(indices) == 1
return vec(out)
end
return out
end
# TODO: look for speed gains
"""
conditionalprobabilities(M::AbstractAutologisticModel; vertices=1:size(M.unary)[1],
indices=1:size(M.unary,2))
Compute the conditional probability that variables in autologistic model `M` take the high
state, given the current values of all of their neighbors. If `vertices` or `indices` are
provided, the results are only computed for the desired variables & observations.
Otherwise results are computed for all variables and observations.
# Examples
```jldoctest
julia> Y = [ones(9) zeros(9)];
julia> G = makegrid4(3,3).G;
julia> model = ALsimple(G, ones(9,2), Y=Y, λ=0.5); #-Variables on a 3×3 grid, 2 obs.
julia> conditionalprobabilities(model, vertices=5) #-Cond. probs. of center vertex.
1×2 Array{Float64,2}:
0.997527 0.119203
julia> conditionalprobabilities(model, indices=2) #-Cond probs, 2nd observation.
9×1 Array{Float64,2}:
0.5
0.26894142136999516
0.5
0.26894142136999516
0.11920292202211756
0.26894142136999516
0.5
0.26894142136999516
0.5
```
"""
function conditionalprobabilities(M::AbstractAutologisticModel; vertices=1:size(M.unary,1),
indices=1:size(M.unary,2))
n, m = size(M.unary)
if minimum(vertices)<1 || maximum(vertices)>n
error("vertices index out of bounds")
end
if minimum(indices)<1 || maximum(indices)>m
error("observation index out of bounds")
end
out = zeros(Float64, length(vertices), length(indices))
Y = makecoded(M)
μ = centeringterms(M)
lo, hi = M.coding
adjlist = M.pairwise.G.fadjlist
for j = 1:length(indices)
r = indices[j]
for i = 1:length(vertices)
v = vertices[i]
# get neighbor sum
ns = 0.0
for ix in adjlist[v]
ns = ns + M.pairwise[v,ix,r] * (Y[ix,r] - μ[ix,r])
end
# get cond prob
loval = exp(lo*(M.unary[v,r] + ns))
hival = exp(hi*(M.unary[v,r] + ns))
if hival == Inf
out[i,j] = 1.0
else
out[i,j] = hival / (loval + hival)
end
end
end
return out
end
"""
sample(M::AbstractAutologisticModel, k::Int = 1;
method::SamplingMethods = Gibbs,
indices = 1:size(M.unary,2),
average::Bool = false,
config = nothing,
burnin::Int = 0,
skip::Int = 0,
verbose::Bool = false
)
Draws `k` random samples from autologistic model `M`. For a model with `n` vertices in
its graph, the return value is:
- When `average=false`, an `n` × `length(indices)` × `k` array, with singleton dimensions
dropped. This array holds the random samples.
- When `average=true`, an `n` × `length(indices)` array, with singleton dimensions dropped.
This array holds the estimated marginal probabilities of observing the "high" level at
each vertex.
# Arguments
- `method`: a member of the enum [`SamplingMethods`](@ref), specifying which sampling
method will be used. The default is Gibbs sampling. Where feasible, it is recommended
to use one of the perfect sampling alternatives. See [`SamplingMethods`](@ref) for more.
- `indices`: gives the indices of the observation to use for sampling. If the model has
more than one observation, then `k` samples are drawn for each observation's parameter
settings. Use `indices` to restrict the samples to a subset of observations.
- `average`: controls the form of the output. When `average=true`, the return value is the
proportion of "high" samples at each vertex. (Note that this is **not** actually the
arithmetic average of the samples, unless the coding is (0,1). Rather, it is an estimate of
the probability of getting a "high" outcome.) When `average=false`, the full set of
samples is returned.
- `config`: allows a starting configuration of the random variables to be provided. Only
used if `method=Gibbs`. Any vector of the correct length, with two unique values, can be
used as `config`. By default a random configuration is used.
- `burnin`: specifies the number of initial samples to discard from the results. Only used
if `method=Gibbs`.
- `skip`: specifies how many samples to throw away between returned samples. Only used
if `method=Gibbs`.
- `verbose`: controls output to the console. If `true`, intermediate information about
sampling progress is printed to the console. Otherwise no output is shown.
# Examples
```jldoctest
julia> M = ALsimple(Graph(4,4));
julia> M.coding = (-2,3);
julia> r = sample(M,10);
julia> size(r)
(4, 10)
julia> sort(unique(r))
2-element Array{Float64,1}:
-2.0
3.0
```
"""
function sample(M::AbstractAutologisticModel, k::Int = 1; method::SamplingMethods = Gibbs,
indices=1:size(M.unary,2), average::Bool = false, config = nothing,
burnin::Int = 0, skip::Int = 0, verbose::Bool = false)
#NOTE: if keyword argument list changes in future, need to update the tuple
# samplenames inside splitkw() to match.
# Create storage object
n, m = size(M.unary)
nidx = length(indices)
if k < 1
error("k must be positive")
end
if minimum(indices) < 1 || maximum(indices) > m
error("indices must be between 1 and the number of observations")
end
if burnin < 0
error("burnin must be nonnegative")
end
if average
out = zeros(Float64, n, nidx)
else
out = zeros(Float64, n, nidx, k)
end
if method ∈ (perfect_read_once, perfect_reuse_samples, perfect_reuse_seeds) && any(M.pairwise .< 0)
@warn "The chosen pefect sampling method isn't theoretically justified for\n" *
"negative pairwise values. Samples might still be similar to Gibbs sampling\n" *
"draws. Consider the pefect_bounding_chain method."
end
# Call the sampling function for each index. Give details if verbose=true.
for i = 1:nidx
if verbose
println("== Sampling observation $(indices[i]) ==")
end
out[:,i,:] = sample_one_index(M, k, method=method,
index=indices[i], average=average,
config=config, burnin=burnin, skip=skip, verbose=verbose)
end
# Return
if average
if nidx==1
out = dropdims(out,dims=2)
end
else
if nidx==1 && k==1
out = dropdims(out,dims=(2,3))
elseif nidx==1
out = dropdims(out,dims=2)
elseif k == 1
out = dropdims(out,dims=3)
end
end
return out
end
function sample_one_index(M::AbstractAutologisticModel, k::Int = 1;
method::SamplingMethods = Gibbs, index::Int = 1, average::Bool = false,
config = nothing, burnin::Int = 0, skip::Int = 0, verbose::Bool = false)
lo = Float64(M.coding[1])
hi = Float64(M.coding[2])
Λ = M.pairwise[:,:,index]
α = M.unary[:,index]
μ = centeringterms(M)[:,index]
n = length(α)
adjlist = M.pairwise.G.fadjlist
if method == Gibbs
if config == nothing
Y = rand([lo, hi], n)
else
Y = vec(makecoded(makebool(config, M.coding), M.coding))
end
return gibbssample(lo, hi, Y, Λ, adjlist, α, μ, n, k, average, burnin, skip, verbose)
elseif method == perfect_reuse_samples
return cftp_reuse_samples(lo, hi, Λ, adjlist, α, μ, n, k, average, verbose)
elseif method == perfect_reuse_seeds
return cftp_reuse_seeds(lo, hi, Λ, adjlist, α, μ, n, k, average, verbose)
elseif method == perfect_bounding_chain
return cftp_bounding_chain(lo, hi, Λ, adjlist, α, μ, n, k, average, verbose)
elseif method == perfect_read_once
return cftp_read_once(lo, hi, Λ, adjlist, α, μ, n, k, average, verbose)
end
end
"""
makecoded(M::AbstractAutologisticModel)
A convenience method for `makecoded(M.responses, M.coding)`. Use it to retrieve a model's
responses in coded form.
# Examples
```jldoctest
julia> M = ALRsimple(Graph(4,3), rand(4,2), Y=[true, false, false, true], coding=(-1,1));
julia> makecoded(M)
4×1 Array{Float64,2}:
1.0
-1.0
-1.0
1.0
```
"""
function makecoded(M::AbstractAutologisticModel)
return makecoded(M.responses, M.coding)
end
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 2584 | """
AbstractPairwiseParameter
Abstract type representing the pairwise part of an autologistic regression model.
All concrete subtypes should have the following fields:
* `G::SimpleGraph{Int}` -- The graph for the model.
* `count::Int` -- The number of observations.
In addition to `getindex()` and `setindex!()`, any concrete subtype
`P<:AbstractPairwiseParameter` should also have the following methods defined:
* `getparameters(P)`, returning a Vector{Float64}
* `setparameters!(P, newpar::Vector{Float64})` for setting parameter values.
Note that indexing is performance-critical and should be implemented carefully in
subtypes.
The intention is that each subtype should implement a different way of parameterizing
the association matrix. The way parameters are stored and values computed is up to the
subtypes.
This type inherits from `AbstractArray{Float64, 3}`. The third index is to allow for
multiple observations. `P[:,:,r]` should return the association matrix of the rth
observation in an appropriate subtype of AbstractMatrix. It is not intended that the third
index will be used for range or vector indexing like `P[:,:,1:5]` (though this may work
due to AbstractArray fallbacks).
# Examples
```jldoctest
julia> M = ALsimple(Graph(4,4));
julia> typeof(M.pairwise)
SimplePairwise
julia> isa(M.pairwise, AbstractPairwiseParameter)
true
```
"""
abstract type AbstractPairwiseParameter <: AbstractArray{Float64, 3} end
IndexStyle(::Type{<:AbstractPairwiseParameter}) = IndexCartesian()
#---- fallback methods --------------
size(p::AbstractPairwiseParameter) = (nv(p.G), nv(p.G), p.count)
function getindex(p::AbstractPairwiseParameter, I::AbstractVector, J::AbstractVector)
error("getindex not implemented for $(typeof(p))")
end
function show(io::IO, p::AbstractPairwiseParameter)
r, c, m = size(p)
str = "$(size2string(p)) $(typeof(p))"
print(io, str)
end
function show(io::IO, ::MIME"text/plain", p::AbstractPairwiseParameter)
r, c, m = size(p)
if m==1
str = " with $(r) vertices.\n"
else
str = "\nwith $(r) vertices and $(m) observations.\n"
end
print(io, "Autologistic pairwise parameter Λ of type $(typeof(p)), ",
"$(size2string(p)) array\n",
"Fields:\n",
showfields(p,2),
"Use indexing (e.g. mypairwise[:,:,:]) to see Λ values.")
end
function showfields(p::AbstractPairwiseParameter, leadspaces=0)
return repeat(" ", leadspaces) *
"(**Autologistic.showfields not implemented for $(typeof(p))**)\n"
end
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 1694 | """
AbstractUnaryParameter
Abstract type representing the unary part of an autologistic regression model.
This type inherits from AbstractArray{Float64, 2}. The first dimension is for
vertices/variables in the graph, and the second dimension is for observations. It is
two-dimensional even if there is only one observation.
Implementation details are left to concrete subtypes, and will depend on how the unary
terms are parametrized. Note that indexing is performance-critical.
Concrete subtypes should implement `getparameters`, `setparameters!`, and `showfields`.
# Examples
```jldoctest
julia> M = ALsimple(Graph(4,4));
julia> typeof(M.unary)
FullUnary
julia> isa(M.unary, AbstractUnaryParameter)
true
```
"""
abstract type AbstractUnaryParameter <: AbstractArray{Float64, 2} end
IndexStyle(::Type{<:AbstractUnaryParameter}) = IndexCartesian()
function show(io::IO, u::AbstractUnaryParameter)
r, c = size(u)
str = "$(r)×$(c) $(typeof(u))"
print(io, str)
end
function show(io::IO, ::MIME"text/plain", u::AbstractUnaryParameter)
r, c = size(u)
if c==1
str = "\n$(size2string(u)) array with average value $(round(mean(u), digits=3)).\n"
else
str = " $(size2string(u)) array.\n"
end
print(io, "Autologistic unary parameter α of type $(typeof(u)),",
str,
"Fields:\n",
showfields(u,2),
"Use indexing (e.g. myunary[:,:]) to see α values.")
end
function showfields(u::AbstractUnaryParameter, leadspaces=0)
return repeat(" ", leadspaces) *
"(**Autologistic.showfields not implemented for $(typeof(u))**)\n"
end
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 11622 | # Type Aliases
""" Type alias for `Union{Array{T,1}, Array{T,2}} where T` """
const VecOrMat = Union{Array{T,1}, Array{T,2}} where T
""" Type alias for `Union{Array{Float64,1},Array{Float64,2}}` """
const Float1D2D = Union{Array{Float64,1},Array{Float64,2}}
""" Type alias for `Union{Array{Float64,2},Array{Float64,3}}` """
const Float2D3D = Union{Array{Float64,2},Array{Float64,3}}
""" Type alias for `Union{Array{NTuple{2,T},1},Array{NTuple{3,T},1}} where T<:Real` """
const SpatialCoordinates = Union{Array{NTuple{2,T},1},Array{NTuple{3,T},1}} where T<:Real
# somewhat arbitrary constants in sampling algorithms
const maxepoch = 40 #used in cftp_reuse_seeds
const ntestchains = 15 #used in blocksize_estimate
# Enumerations
"""
CenteringKinds
An enumeration to facilitate choosing a form of centering for the model. Available
choices are:
- `none`: no centering (centering adjustment equals zero).
- `expectation`: the centering adjustment is the expected value of the response under the
assumption of independence (this is what has been used in the "centered autologistic
model").
- `onehalf`: a constant value of centering adjustment equal to 0.5 (this produces the
"symmetric autologistic model" when used with 0,1 coding).
The default/recommended model has centering of `none` with (-1, 1) coding.
# Examples
```jldoctest
julia> CenteringKinds
Enum CenteringKinds:
none = 0
expectation = 1
onehalf = 2
```
"""
@enum CenteringKinds none expectation onehalf
"""
SamplingMethods
An enumeration to facilitate choosing a method for random sampling from autologistic models.
Available choices are:
- `Gibbs`: Gibbs sampling.
- `perfect_bounding_chain`: Perfect sampling, using a bounding chain algorithm.
- `perfect_reuse_samples`: Perfect sampling. CFTP implemented by reusing random numbers.
- `perfect_reuse_seeds`: Perfect sampling. CFTP implemented by reusing RNG seeds.
- `perfect_read_once`: Perfect sampling. Read-once CFTP implementation.
All of the perfect sampling methods are implementations of coupling from the past (CFTP).
`perfect_bounding_chain` uses a bounding chain approach that holds even when Λ contains
negative elements; the other three options rely on a monotonicity argument that requires
Λ to have only positive elements (though they should work similar to Gibbs sampling in
that case).
Different perfect sampling implementations might work best for different models, and
parameter settings exist where perfect sampling coalescence might take a prohibitively long
time. For these reasons, Gibbs sampling is the default in `sample`.
# Examples
```jldoctest
julia> SamplingMethods
Enum SamplingMethods:
Gibbs = 0
perfect_reuse_samples = 1
perfect_reuse_seeds = 2
perfect_read_once = 3
perfect_bounding_chain = 4
```
"""
@enum SamplingMethods Gibbs perfect_reuse_samples perfect_reuse_seeds perfect_read_once perfect_bounding_chain
"""
makebool(v::VecOrMat, vals=nothing)
Makes a 2D array of Booleans out of a 1- or 2-D input. The 2nd argument `vals` optionally
can be a 2-tuple (low, high) specifying the two possible values in `v` (useful for the case
where all elements of `v` take one value or the other).
- If `v` has more than 2 unique values, throws an error.
- If `v` has exactly 2 unique values, use those to set the coding (ignore `vals`).
- If `v` has 1 unique value, use `vals` to determine if it's the high or low value (throw
an error if the single value isn't in `vals`).
# Examples
```jldoctest
julia> makebool([1.0 2.0; 1.0 2.0])
2×2 Array{Bool,2}:
false true
false true
julia> makebool(["yes", "no", "no"])
3×1 Array{Bool,2}:
true
false
false
julia> [makebool([1, 1, 1], (-1,1)) makebool([1, 1, 1], (1, 2))]
3×2 Array{Bool,2}:
true false
true false
true false
```
"""
function makebool(v::VecOrMat, vals=nothing)
if ndims(v)==1
v = v[:,:] #**convet to 2D, not sure the logic behind [:,:] index
end
if typeof(v) == Array{Bool,2}
return v
end
(nrow, ncol) = size(v)
out = Array{Bool}(undef, nrow, ncol)
nv = length(unique(v))
if nv > 2
error("The input has more than two values.")
elseif nv == 2
lower = minimum(v)
elseif typeof(vals) <: NTuple{2} && v[1] in vals
lower = vals[1]
else
error("One unique value. Could not assign true or false.")
end
for i in 1:nrow
for j in 1:ncol
v[i,j]==lower ? out[i,j] = false : out[i,j] = true
end
end
return out
end
"""
makecoded(b::VecOrMat, coding::Tuple{Real,Real})
Convert Boolean responses into coded values. The first argument is boolean.
Returns a 2D array of Float64.
# Examples
```jldoctest
julia> makecoded([true, false, false, true], (-1, 1))
4×1 Array{Float64,2}:
1.0
-1.0
-1.0
1.0
```
"""
function makecoded(b::VecOrMat, coding::Tuple{Real,Real})
lo = Float64(coding[1])
hi = Float64(coding[2])
if ndims(b)==1
b = b[:,:]
end
n, m = size(b)
out = Array{Float64,2}(undef, n, m)
for j = 1:m
for i = 1:n
out[i,j] = b[i,j] ? hi : lo
end
end
return out
end
"""
makegrid4(r::Int, c::Int, xlim::Tuple{Real,Real}=(0.0,1.0),
ylim::Tuple{Real,Real}=(0.0,1.0))
Returns a named tuple `(:G, :locs)`, where `:G` is a graph, and `:locs` is an array of
numeric tuples. Vertices of `:G` are laid out in a rectangular, 4-connected grid with
`r` rows and `c` columns. The tuples in `:locs` contain the spatial coordinates of each
vertex. Optional arguments `xlim` and `ylim` determine the bounds of the rectangular
layout.
# Examples
```jldoctest
julia> out4 = makegrid4(11, 21, (-1,1), (-10,10));
julia> nv(out4.G) == 11*21 #231
true
julia> ne(out4.G) == 11*20 + 21*10 #430
true
julia> out4.locs[11*10 + 6] == (0.0, 0.0) #location of center vertex.
true
```
"""
function makegrid4(r::Int, c::Int, xlim::Tuple{Real,Real}=(0.0,1.0),
ylim::Tuple{Real,Real}=(0.0,1.0))
# Create graph with r*c vertices, no edges
G = Graph(r*c)
# loop through vertices. Number vertices columnwise.
for i in 1:r*c
if mod(i,r) !== 1 # N neighbor
add_edge!(G,i,i-1)
end
if i <= (c-1)*r # E neighbor
add_edge!(G,i,i+r)
end
if mod(i,r) !== 0 # S neighbor
add_edge!(G,i,i+1)
end
if i > r # W neighbor
add_edge!(G,i,i-r)
end
end
rngx = range(xlim[1], stop=xlim[2], length=c)
rngy = range(ylim[1], stop=ylim[2], length=r)
locs = [(rngx[i], rngy[j]) for i in 1:c for j in 1:r]
return (G=G, locs=locs)
end
"""
makegrid8(r::Int, c::Int, xlim::Tuple{Real,Real}=(0.0,1.0),
ylim::Tuple{Real,Real}=(0.0,1.0))
Returns a named tuple `(:G, :locs)`, where `:G` is a graph, and `:locs` is an array of
numeric tuples. Vertices of `:G` are laid out in a rectangular, 8-connected grid with
`r` rows and `c` columns. The tuples in `:locs` contain the spatial coordinates of each
vertex. Optional arguments `xlim` and `ylim` determine the bounds of the rectangular
layout.
# Examples
```jldoctest
julia> out8 = makegrid8(11, 21, (-1,1), (-10,10));
julia> nv(out8.G) == 11*21 #231
true
julia> ne(out8.G) == 11*20 + 21*10 + 2*20*10 #830
true
julia> out8.locs[11*10 + 6] == (0.0, 0.0) #location of center vertex.
true
```
"""
function makegrid8(r::Int, c::Int, xlim::Tuple{Real,Real}=(0.0,1.0),
ylim::Tuple{Real,Real}=(0.0,1.0))
# Create the 4-connected graph
G, locs = makegrid4(r, c, xlim, ylim)
# loop through vertices and add the diagonal edges.
for i in 1:r*c
if (mod(i,r) !== 1) && (i<=(c-1)*r) # NE neighbor
add_edge!(G,i,i+r-1)
end
if (mod(i,r) !== 0) && (i <= (c-1)*r) # SE neighbor
add_edge!(G,i,i+r+1)
end
if (mod(i,r) !== 0) && (i > r) # SW neighbor
add_edge!(G,i,i-r+1)
end
if (mod(i,r) !== 1) && (i > r) # NW neighbor
add_edge!(G,i,i-r-1)
end
end
return (G=G, locs=locs)
end
"""
makespatialgraph(coords::C, δ::Real) where C<:SpatialCoordinates
Returns a named tuple `(:G, :locs)`, where `:G` is a graph, and `:locs` is an array of
numeric tuples. Each element of `coords` is a 2- or 3-tuple of spatial coordinates, and
this argument is returned unchanged as `:locs`. The graph `:G` has `length(coords)`
vertices, with edges connecting every pair of vertices within Euclidean distance `δ` of
each other.
# Examples
```jldoctest
julia> c = [(Float64(i), Float64(j)) for i = 1:5 for j = 1:5];
julia> out = makespatialgraph(c, sqrt(2));
julia> out.G
{25, 72} undirected simple Int64 graph
julia> length(out.locs)
25
```
"""
function makespatialgraph(coords::C, δ::Real) where C<:SpatialCoordinates
#Replace coords by an equivalent tuple of Float64, for consistency
n = length(coords)
locs = [Float64.(coords[i]) for i = 1:n]
#Make the graph and add edges
G = Graph(n)
for i in 1:n
for j in i+1:n
if norm(locs[i] .- locs[j]) <= δ
add_edge!(G,i,j)
end
end
end
return (G=G, locs=locs)
end
"""
`Autologistic.datasets(name::String)`
Open data sets for demonstrations of Autologistic regression. Returns the data set as a `DataFrame`.
"""
function datasets(name::String)
if name=="pigmentosa"
dfpath = joinpath(dirname(pathof(Autologistic)), "..", "assets", "pigmentosa.csv")
return read(dfpath, DataFrame)
elseif name=="hydrocotyle"
dfpath = joinpath(dirname(pathof(Autologistic)), "..", "assets", "hydrocotyle.csv")
return read(dfpath, DataFrame)
else
error("Name is not one of the available options.")
end
end
# Make size into strings like 10×5×2 (for use in show methods)
function size2string(x::T) where T<:AbstractArray
d = size(x)
n = length(d)
if n ==1
return "$(d[1])-element"
else
str = "$(d[1])"
for i = 2:n
str *= "×"
str *= "$(d[i])"
end
return str
end
end
# Approximate the Hessian of fcn at the point x, using a step width h.
# Uses the O(h^2) central difference approximation.
# Intended for obtaining standard errors from ML fitting.
function hess(fcn, x, h=1e-6)
n = length(x)
H = zeros(n,n)
hI = h*Matrix(1.0I,n,n) #ith column of hI has h in ith position, 0 elsewhere.
# Fill up the top half of the matrix
for i = 1:n
for j = i:n
h1 = hI[:,i]
h2 = hI[:,j];
H[i,j] = (fcn(x+h1+h2)-fcn(x+h1-h2)-fcn(x-h1+h2)+fcn(x-h1-h2)) / (4*h^2)
end
end
# Fill the bottom half of H (use symmetry), and return
return H + triu(H,1)'
end
# Takes a named tuple (arising from keyword argument list) and produces two named tuples:
# one with the arguments for optimise(), and one for arguments to sample()
# Usage: optimargs, sampleargs = splitkw(keyword_tuple)
splitkw = function(kwargs)
optimnames = fieldnames(typeof(Options()))
samplenames = (:method, :indices, :average, :config, :burnin, :verbose)
optimargs = Dict{Symbol,Any}()
sampleargs = Dict{Symbol,Any}()
for (symb, val) in pairs(kwargs)
if symb in optimnames
push!(optimargs, symb => val)
end
if symb in samplenames
push!(sampleargs, symb => val)
end
end
return (;optimargs...), (;sampleargs...)
end
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 6123 | """
FullPairwise
A type representing an association matrix with a "saturated" parametrization--one parameter
for each edge in the graph.
In this type, the association matrix for each observation is a symmetric matrix with the
same pattern of nonzeros as the graph's adjacency matrix, but with arbitrary values in those
locations. The package convention is to provide parameters as a vector of `Float64`. So
`getparameters` and `setparameters!` use a vector of `ne(G)` values that correspond to the
nonzero locations in the upper triangle of the adjacency matrix, in the same (lexicographic)
order as `edges(G)`.
The association matrix is stored as a `SparseMatrixCSC{Float64,Int64}` in the field Λ.
This type does not allow for different observations to have different association matricse.
So while `size` returns a 3-dimensional result, the third index is ignored
when accessing the array's elements.
# Constructors
FullPairwise(G::SimpleGraph, count::Int=1)
FullPairwise(n::Int, count::Int=1)
FullPairwise(λ::Real, G::SimpleGraph)
FullPairwise(λ::Real, G::SimpleGraph, count::Int)
FullPairwise(λ::Vector{Float64}, G::SimpleGraph)
If provide only a graph, set λ = zeros(nv(G)).
If provide only an integer, set λ to zeros and make a totally disconnected graph.
If provide a graph and a scalar, convert the scalar to a vector of the right length.
# Examples
```jldoctest
julia> g = makegrid4(2,2).G;
julia> λ = [1.0, 2.0, -1.0, -2.0];
julia> p = FullPairwise(λ, g);
julia> typeof(p.Λ)
SparseArrays.SparseMatrixCSC{Float64,Int64}
julia> Matrix(p[:,:])
4×4 Array{Float64,2}:
0.0 1.0 2.0 0.0
1.0 0.0 0.0 -1.0
2.0 0.0 0.0 -2.0
0.0 -1.0 -2.0 0.0
```
"""
mutable struct FullPairwise <: AbstractPairwiseParameter
λ::Vector{Float64}
G::SimpleGraph{Int}
count::Int
Λ::SparseMatrixCSC{Float64,Int64}
function FullPairwise(lam, g, m)
if length(lam) !== ne(g)
error("FullPairwise: length(λ) must equal the number of edges in the graph.")
end
if m < 1
error("FullPairwise: count must be positive")
end
vv1 = Vector{Int64}(undef, ne(g))
vv2 = Vector{Int64}(undef, ne(g))
i = 1
for e in edges(g)
vv1[i] = e.src
vv2[i] = e.dst
i += 1
end
Λ = sparse([vv1; vv2], [vv2; vv1], [lam; lam], nv(g), nv(g))
new(lam, g, m, Λ)
end
end
# Constructors
# - If provide only a graph, set λ = zeros(nv(graph)).
# - If provide only an integer, set λ to zeros and make a totally disconnected graph.
# - If provide a graph and a scalar, convert the scalar to a vector of the right length.
FullPairwise(G::SimpleGraph, count::Int=1) = FullPairwise(zeros(ne(G)), G, count)
FullPairwise(n::Int, count::Int=1) = FullPairwise(0, SimpleGraph(n), count)
FullPairwise(λ::Real, G::SimpleGraph) = FullPairwise((Float64)(λ)*ones(ne(G)), G, 1)
FullPairwise(λ::Real, G::SimpleGraph, count::Int) = FullPairwise((Float64)(λ)*ones(ne(G)), G, count)
FullPairwise(λ::Vector{Float64}, G::SimpleGraph) = FullPairwise(λ, G, 1)
#---- AbstractArray methods ---- (following sparsematrix.jl)
# getindex - implementations
getindex(p::FullPairwise, i::Int, j::Int) = p.Λ[i, j]
getindex(p::FullPairwise, i::Int) = p.Λ[i]
getindex(p::FullPairwise, ::Colon, ::Colon) = p.Λ
getindex(p::FullPairwise, I::AbstractArray) = p.Λ[I]
getindex(p::FullPairwise, I::AbstractVector, J::AbstractVector) = p.Λ[I,J]
# getindex - translations
getindex(p::FullPairwise, I::Tuple{Integer, Integer}) = p[I[1], I[2]]
getindex(p::FullPairwise, I::Tuple{Integer, Integer, Integer}) = p[I[1], I[2]]
getindex(p::FullPairwise, i::Int, j::Int, r::Int) = p[i,j]
getindex(p::FullPairwise, ::Colon, ::Colon, ::Colon) = p[:,:]
getindex(p::FullPairwise, ::Colon, ::Colon, r::Int) = p[:,:]
getindex(p::FullPairwise, ::Colon, j) = p[1:size(p.Λ,1), j]
getindex(p::FullPairwise, i, ::Colon) = p[i, 1:size(p.Λ,2)]
getindex(p::FullPairwise, ::Colon, j, r) = p[:,j]
getindex(p::FullPairwise, i, ::Colon, r) = p[i,:]
getindex(p::FullPairwise, I::AbstractVector{Bool}, J::AbstractRange{<:Integer}) = p[findall(I),J]
getindex(p::FullPairwise, I::AbstractRange{<:Integer}, J::AbstractVector{Bool}) = p[I,findall(J)]
getindex(p::FullPairwise, I::Integer, J::AbstractVector{Bool}) = p[I,findall(J)]
getindex(p::FullPairwise, I::AbstractVector{Bool}, J::Integer) = p[findall(I),J]
getindex(p::FullPairwise, I::AbstractVector{Bool}, J::AbstractVector{Bool}) = p[findall(I),findall(J)]
getindex(p::FullPairwise, I::AbstractVector{<:Integer}, J::AbstractVector{Bool}) = p[I,findall(J)]
getindex(p::FullPairwise, I::AbstractVector{Bool}, J::AbstractVector{<:Integer}) = p[findall(I),J]
# setindex!
setindex!(p::FullPairwise, i::Int, j::Int) =
error("Pairwise values cannot be set directly. Use setparameters! instead.")
setindex!(p::FullPairwise, i::Int, j::Int, k::Int) =
error("Pairwise values cannot be set directly. Use setparameters! instead.")
setindex!(p::FullPairwise, i::Int) =
error("Pairwise values cannot be set directly. Use setparameters! instead.")
#---- AbstractPairwiseParameter interface methods ----
# For getparameters(), update p.λ before returning the parameters (to avoid case
# where Λ is manually replaced without updating the parameter vector).
# For separameters!() update Λ whenever the parameters change.
function getparameters(p::FullPairwise)
i = 1
for e in edges(p.G)
p.λ[i] = p.Λ[e.src,e.dst]
i += 1
end
return p.λ
end
function setparameters!(p::FullPairwise, newpar::Vector{Float64})
i = 1
for e in edges(p.G)
p.λ[i] = newpar[i]
p.Λ[e.src,e.dst] = newpar[i]
p.Λ[e.dst,e.src] = newpar[i]
i += 1
end
end
#---- to be used in show methods ----
function showfields(p::FullPairwise, leadspaces=0)
spc = repeat(" ", leadspaces)
return spc * "λ edge-ordered vector of association parameter values\n" *
spc * "G the graph ($(nv(p.G)) vertices, $(ne(p.G)) edges)\n" *
spc * "count $(p.count) (the number of observations)\n" *
spc * "Λ $(size2string(p.Λ)) SparseMatrixCSC (the association matrix)\n"
end
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 2764 | """
FullUnary
The unary part of an autologistic model, with one parameter per vertex per observation. The
type has only a single field, for holding an array of parameters.
# Constructors
FullUnary(alpha::Array{Float64,1})
FullUnary(n::Int) #-initializes parameters to zeros
FullUnary(n::Int, m::Int) #-initializes parameters to zeros
# Examples
```jldoctest
julia> u = FullUnary(5, 3);
julia> u[:,:]
5×3 Array{Float64,2}:
0.0 0.0 0.0
0.0 0.0 0.0
0.0 0.0 0.0
0.0 0.0 0.0
0.0 0.0 0.0
```
"""
mutable struct FullUnary <: AbstractUnaryParameter
α::Array{Float64,2}
end
# Constructors
function FullUnary(alpha::Array{Float64,1})
return FullUnary( reshape(alpha, (length(alpha),1)) )
end
FullUnary(n::Int) = FullUnary(zeros(Float64,n,1))
FullUnary(n::Int,m::Int) = FullUnary(zeros(Float64,n,m))
#---- AbstractArray methods ----
size(u::FullUnary) = size(u.α)
length(u::FullUnary) = length(u.α)
# getindex - implementations
getindex(u::FullUnary, I::AbstractArray) = u.α[I]
getindex(u::FullUnary, i::Int, j::Int) = u.α[i,j]
getindex(u::FullUnary, ::Colon, ::Colon) = u.α
getindex(u::FullUnary, I::AbstractVector, J::AbstractVector) = u.α[I,J]
# getindex - translations
getindex(u::FullUnary, I::Tuple{Integer, Integer}) = u[I[1], I[2]]
getindex(u::FullUnary, ::Colon, j::Int) = u[1:size(u.α,1), j]
getindex(u::FullUnary, i::Int, ::Colon) = u[i, 1:size(u.α,2)]
getindex(u::FullUnary, I::AbstractRange{<:Integer}, J::AbstractVector{Bool}) = u[I,findall(J)]
getindex(u::FullUnary, I::AbstractVector{Bool}, J::AbstractRange{<:Integer}) = u[findall(I),J]
getindex(u::FullUnary, I::Integer, J::AbstractVector{Bool}) = u[I,findall(J)]
getindex(u::FullUnary, I::AbstractVector{Bool}, J::Integer) = u[findall(I),J]
getindex(u::FullUnary, I::AbstractVector{Bool}, J::AbstractVector{Bool}) = u[findall(I),findall(J)]
getindex(u::FullUnary, I::AbstractVector{<:Integer}, J::AbstractVector{Bool}) = u[I,findall(J)]
getindex(u::FullUnary, I::AbstractVector{Bool}, J::AbstractVector{<:Integer}) = u[findall(I),J]
# setindex!
setindex!(u::FullUnary, v::Real, I::Vararg{Int,2}) = (u.α[CartesianIndex(I)] = v)
#---- AbstractUnaryParameter interface ----
getparameters(u::FullUnary) = dropdims(reshape(u, length(u), 1), dims=2)
function setparameters!(u::FullUnary, newpars::Vector{Float64})
# Note, not implementing subsetting etc., can only replace the whole vector.
if length(newpars) != length(u)
error("incorrect parameter vector length")
end
u.α = reshape(newpars, size(u))
end
#---- to be used in show methods ----
function showfields(u::FullUnary, leadspaces=0)
spc = repeat(" ", leadspaces)
return spc * "α $(size2string(u.α)) array (unary parameter values)\n"
end | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 4276 | """
LinPredUnary
The unary part of an autologistic model, parametrized as a regression linear predictor.
Its fields are `X`, an n-by-p-by-m matrix (n obs, p predictors, m observations), and `β`,
a p-vector of parameters (the same for all observations).
# Constructors
LinPredUnary(X::Matrix{Float64}, β::Vector{Float64})
LinPredUnary(X::Matrix{Float64})
LinPredUnary(X::Array{Float64, 3})
LinPredUnary(n::Int,p::Int)
LinPredUnary(n::Int,p::Int,m::Int)
Any quantities not provided in the constructors are initialized to zeros.
# Examples
```jldoctest
julia> u = LinPredUnary(ones(5,3,2), [1.0, 2.0, 3.0]);
julia> u[:,:]
5×2 Array{Float64,2}:
6.0 6.0
6.0 6.0
6.0 6.0
6.0 6.0
6.0 6.0
```
"""
struct LinPredUnary <: AbstractUnaryParameter
X::Array{Float64, 3}
β::Vector{Float64}
function LinPredUnary(x, beta)
if size(x)[2] != length(beta)
error("LinPredUnary: X and β dimensions are inconsistent")
end
new(x, beta)
end
end
# Constructors
function LinPredUnary(X::Matrix{Float64}, β::Vector{Float64})
(n,p) = size(X)
return LinPredUnary(reshape(X,(n,p,1)), β)
end
function LinPredUnary(X::Matrix{Float64})
(n,p) = size(X)
return LinPredUnary(reshape(X,(n,p,1)), zeros(Float64,p))
end
function LinPredUnary(X::Array{Float64, 3})
(n,p,m) = size(X)
return LinPredUnary(X, zeros(Float64,p))
end
function LinPredUnary(n::Int,p::Int)
X = zeros(Float64,n,p,1)
return LinPredUnary(X, zeros(Float64,p))
end
function LinPredUnary(n::Int,p::Int,m::Int)
X = zeros(Float64,n,p,m)
return LinPredUnary(X, zeros(Float64,p))
end
#---- AbstractArray methods ----
size(u::LinPredUnary) = (size(u.X,1), size(u.X,3))
# getindex - implementations
function getindex(u::LinPredUnary, ::Colon, ::Colon)
n, p, m = size(u.X)
out = zeros(n,m)
for r = 1:m
for i = 1:n
for j = 1:p
out[i,r] = out[i,r] + u.X[i,j,r] * u.β[j]
end
end
end
return out
end
function getindex(u::LinPredUnary, I::AbstractArray)
out = u[:,:]
return out[I]
end
getindex(u::LinPredUnary, i::Int, r::Int) = sum(u.X[i,:,r] .* u.β)
function getindex(u::LinPredUnary, ::Colon, r::Int)
n, p, m = size(u.X)
out = zeros(n)
for i = 1:n
for j = 1:p
out[i] = out[i] + u.X[i,j,r] * u.β[j]
end
end
return out
end
function getindex(u::LinPredUnary, I::AbstractVector, R::AbstractVector)
out = zeros(length(I),length(R))
for r in 1:length(R)
for i in 1:length(I)
for j = 1:size(u.X,2)
out[i,r] = out[i,r] + u.X[I[i],j,R[r]] * u.β[j]
end
end
end
return out
end
# getindex- translations
getindex(u::LinPredUnary, I::Tuple{Integer, Integer}) = u[I[1], I[2]]
getindex(u::LinPredUnary, i::Int, ::Colon) = u[i, 1:size(u.X,3)]
getindex(u::LinPredUnary, I::AbstractRange{<:Integer}, J::AbstractVector{Bool}) = u[I,findall(J)]
getindex(u::LinPredUnary, I::AbstractVector{Bool}, J::AbstractRange{<:Integer}) = u[findall(I),J]
getindex(u::LinPredUnary, I::Integer, J::AbstractVector{Bool}) = u[I,findall(J)]
getindex(u::LinPredUnary, I::AbstractVector{Bool}, J::Integer) = u[findall(I),J]
getindex(u::LinPredUnary, I::AbstractVector{Bool}, J::AbstractVector{Bool}) = u[findall(I),findall(J)]
getindex(u::LinPredUnary, I::AbstractVector{<:Integer}, J::AbstractVector{Bool}) = u[I,findall(J)]
getindex(u::LinPredUnary, I::AbstractVector{Bool}, J::AbstractVector{<:Integer}) = u[findall(I),J]
# setindex!
setindex!(u::LinPredUnary, v::Real, i::Int, j::Int) =
error("Values of $(typeof(u)) must be set using setparameters!().")
#---- AbstractUnaryParameter interface ----
getparameters(u::LinPredUnary) = u.β
function setparameters!(u::LinPredUnary, newpars::Vector{Float64})
u.β[:] = newpars
end
#---- to be used in show methods ----
function showfields(u::LinPredUnary, leadspaces=0)
spc = repeat(" ", leadspaces)
return spc * "X $(size2string(u.X)) array (covariates)\n" *
spc * "β $(size2string(u.β)) vector (regression coefficients)\n"
end | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 14088 | # Neighbor sum of a certain variable. Note, writing the neighbor sum as a loop rather
# than an inner product saved lots of memory allocations.
function nbrsum(Λ, Y, μ, row, nbr)::Float64
out = 0.0
for ix in nbr
out = out + Λ[row,ix] * (Y[ix] - μ[ix])
end
return out
end
# conditional probabilities of a certain variable
condprob(α, ns, lo, hi, ix)::Float64 = 1 / (1 + exp((lo-hi)*(α[ix] + ns)))
# Take a single step of Gibbs sampling, and update variables in-place.
function gibbsstep!(Y, lo, hi, Λ, adjlist, α, μ, n)
ns = 0.0
p_i = 0.0
for i = 1:n
ns = nbrsum(Λ, Y, μ, i, adjlist[i])
p_i = condprob(α, ns, lo, hi, i)
if rand() < p_i
Y[i] = hi
else
Y[i] = lo
end
end
end
# Run Gibbs sampling.
function gibbssample(lo::Float64, hi::Float64, Y::Vector{Float64},
Λ::SparseMatrixCSC{Float64,Int}, adjlist::Array{Array{Int64,1},1},
α::Vector{Float64}, μ::Vector{Float64}, n::Int, k::Int, average::Bool,
burnin::Int, skip::Int, verbose::Bool)
temp = average ? zeros(Float64, n) : zeros(Float64, n, k)
if verbose print("\nStarting burnin...") end
for j = 1:burnin
gibbsstep!(Y, lo, hi, Λ, adjlist, α, μ, n)
end
if verbose print(" complete\n") end
for j = 1:k
gibbsstep!(Y, lo, hi, Λ, adjlist, α, μ, n)
if average
# Performance tip: looping here saves memory allocations. (perhaps until we get
# an operator like .+=)
for i in 1:n
temp[i] = temp[i] + Y[i]
end
else
for i in 1:n
temp[i,j] = Y[i]
end
end
if j != k
for s = 1:skip
gibbsstep!(Y, lo, hi, Λ, adjlist, α, μ, n)
end
end
if verbose
println("finished draw $(j) of $(k)")
end
end
if average
return map(x -> (x - k*lo)/(k*(hi-lo)), temp)
else
return temp
end
end
# Run CFTP epochs from the jth one forward to time zero.
function runepochs!(j, times, Y, seeds, lo, hi, Λ, adjlist, α, μ, n)
for epoch = j:-1:0
seed!(seeds[j+1])
for t = times[j+1,1] : times[j+1,2]
gibbsstep!(Y, lo, hi, Λ, adjlist, α, μ, n)
end
end
end
function cftp_reuse_seeds(lo::Float64, hi::Float64,
Λ::SparseMatrixCSC{Float64,Int}, adjlist::Array{Array{Int64,1},1},
α::Vector{Float64}, μ::Vector{Float64}, n::Int, k::Int,
average::Bool, verbose::Bool)
# Keep track of the seeds used. seeds(j) will hold the seed used to generate samples
# in "epochs" j = 0, 1, 2, ... going backwards in time from time zero. The 0th epoch
# covers time steps -T + 1 to 0, and the jth epoch covers steps -(2^j)T+1 to -2^(j-1)T.
# We'll cap j at maxepoch.
temp = average ? zeros(Float64, n) : zeros(Float64, n, k)
T = 2 #-Initial number of time steps to go back.
seeds = [UInt32[1] for i = 1:maxepoch+1]
times = [-T * 2 .^(0:maxepoch) .+ 1 [0; -T * 2 .^(0:maxepoch-1)]]
L = zeros(n)
H = zeros(n)
goodcount = k
for rep = 1:k
seeds .= [[rand(UInt32)] for i = 1:maxepoch+1]
coalesce = false
j = 0
while !coalesce && j <= maxepoch
fill!(L, lo)
fill!(H, hi)
runepochs!(j, times, L, seeds, lo, hi, Λ, adjlist, α, μ, n)
runepochs!(j, times, H, seeds, lo, hi, Λ, adjlist, α, μ, n)
coalesce = L==H
if verbose
println("Started from -$(times[j+1,1]): $(sum(H .!= L)) elements different.")
end
j = j + 1
end
if !coalesce
@warn "Sampler did not coalesce in replicate $(rep)."
L .= fill(NaN, n)
goodcount -= 1
end
if average && coalesce
for i in 1:n
temp[i] = temp[i] + L[i]
end
else
for i in 1:n
temp[i,rep] = L[i]
end
end
if verbose
println("finished draw $(rep) of $(k)")
end
end
if average
return map(x -> (x - goodcount*lo)/(goodcount*(hi-lo)), temp)
else
return temp
end
end
function cftp_reuse_samples(lo::Float64, hi::Float64,
Λ::SparseMatrixCSC{Float64,Int}, adjlist::Array{Array{Int64,1},1},
α::Vector{Float64}, μ::Vector{Float64}, n::Int, k::Int,
average::Bool, verbose::Bool)
temp = average ? zeros(Float64, n) : zeros(Float64, n, k)
L = zeros(n)
H = zeros(n)
ns = 0.0
p_i = 0.0
# We use matrix U to hold all the uniform random numbers needed to compute the
# lower and upper chains as stochastic recursive sequences. In this matrix each column
# holds the n random variates needed to do a full Gibbs sampling update of the
# variables in the graph. We think of the columns as going backwards in time to the
# right: column one is time 0, column 2 is time -1, ... column T+1 is time -T. So to
# run the chains in forward time we go from right to left.
for rep = 1:k
T = 2 #-T tracks how far back in time we start. Our sample is from time 0.
U = rand(n,1) #-Holds needed random numbers (this matrix will grow)
coalesce = false
while ~coalesce
fill!(L, lo)
fill!(H, hi)
U = [U rand(n,T)]
for t = T+1:-1:1 #-Column t corresponds to time -(t-1).
for i = 1:n
# The lower chain
ns = nbrsum(Λ, L, μ, i, adjlist[i])
p_i = condprob(α, ns, lo, hi, i)
if U[i,t] < p_i
L[i] = hi
else
L[i] = lo
end
# The upper chain
ns = nbrsum(Λ, H, μ, i, adjlist[i])
p_i = condprob(α, ns, lo, hi, i)
if U[i,t] < p_i
H[i] = hi
else
H[i] = lo
end
end
end
coalesce = L==H
if verbose
println("Started from -$(T): $(sum(H .!= L)) elements different.")
end
T = 2*T
end
if average
for i in 1:n
temp[i] = temp[i] + L[i]
end
else
for i in 1:n
temp[i,rep] = L[i]
end
end
if verbose
println("finished draw $(rep) of $(k)")
end
end
if average
return map(x -> (x - k*lo)/(k*(hi-lo)), temp)
else
return temp
end
end
function cftp_read_once(lo::Float64, hi::Float64,
Λ::SparseMatrixCSC{Float64,Int}, adjlist::Array{Array{Int64,1},1},
α::Vector{Float64}, μ::Vector{Float64}, n::Int, k::Int,
average::Bool, verbose::Bool)
blocksize = blocksize_estimate(lo, hi, Λ, adjlist, α, μ, n)
temp = average ? zeros(Float64, n) : zeros(Float64, n, k)
L = zeros(n)
H = zeros(n)
Y = rand([lo, hi], n)
U = zeros(n, blocksize)
oldY = zeros(n)
for rep = 0:k #-Run from zero because 1st draw is discarded.
coalesce = false
while ~coalesce
copyto!(oldY, Y)
fill!(L, lo)
fill!(H, hi)
for i = 1:n #-Performance: assigning to U in a loop uses 0 allocations.
for j = 1:blocksize
U[i,j] = rand()
end
end
gibbsstep_block!(Y, U, lo, hi, Λ, adjlist, α, μ)
gibbsstep_block!(L, U, lo, hi, Λ, adjlist, α, μ)
gibbsstep_block!(H, U, lo, hi, Λ, adjlist, α, μ)
coalesce = L==H
if verbose
println("Sample $(rep) coalesced? $(coalesce).")
end
end
if rep > 0
if average
for i in 1:n
temp[i] = temp[i] + oldY[i]
end
else
for i in 1:n
temp[i,rep] = oldY[i]
end
end
end
end
if average
return map(x -> (x - k*lo)/(k*(hi-lo)), temp)
else
return temp
end
end
function gibbsstep_block!(Z, U, lo, hi, Λ, adjlist, α, μ)
ns = 0.0
p_i = 0.0
n, T = size(U)
for t = 1:T
for i = 1:n
ns = nbrsum(Λ, Z, μ, i, adjlist[i])
p_i = condprob(α, ns, lo, hi, i)
if U[i,t] < p_i
Z[i] = hi
else
Z[i] = lo
end
end
end
end
# Estimate the block size to use for read-once CFTP. Run 15 chains forward until they
# coalesce. Return a quantile of the sample of run lengths as the recommended block size.
function blocksize_estimate(lo, hi, Λ, adjlist, α, μ, n)
coalesce_times = zeros(Int, ntestchains)
L = zeros(n)
H = zeros(n)
U = zeros(n,1)
for rep = 1:ntestchains
coalesce = false
fill!(L, lo)
fill!(H, hi)
count = one(Int)
while ~coalesce
# Performance note: for filling U with random numbers, could i) loop through U
# and fill each element; ii) assign with rand(n,1) on the RHS; or iii) use
# copyto!. Option i) uses no allocations, but empirically seems slower. Option
# iii) seems fastest but requires allocation to produce the rand(n,1).
copyto!(U, rand(n,1))
gibbsstep_block!(L, U, lo, hi, Λ, adjlist, α, μ)
gibbsstep_block!(H, U, lo, hi, Λ, adjlist, α, μ)
coalesce = L==H
count = count + 1
end
coalesce_times[rep] = count
end
return Int(round(quantile(coalesce_times, 0.6)))
end
function cftp_bounding_chain(lo::Float64, hi::Float64,
Λ::SparseMatrixCSC{Float64,Int}, adjlist::Array{Array{Int64,1},1},
α::Vector{Float64}, μ::Vector{Float64}, n::Int, k::Int,
average::Bool, verbose::Bool)
temp = average ? zeros(Float64, n) : zeros(Float64, n, k)
BC = Vector{Int}(undef,n)
# The algorithm is similar to cftp_reuse_samples in that we save the generated random
# variates. But in this method compute probability bounds on each variable to update
# a bounding chain until it coalesces.
for rep = 1:k
T = 2 #-T tracks how far back in time we start. Our sample is from time 0.
U = rand(n,1) #-Holds needed random numbers (this matrix will grow)
coalesce = false
while ~coalesce
# Initialize the bounding chain. The state of the chain is represented by a vector of
# length n, where each element can take values in the set {0, 1, 2}, where:
# - 0 inidicates that the single label "lo" is in the bounding chain for that vertex.
# - 1 indicates that the single label "hi" is in the bounding chain for that vertex.
# - 2 indicates that both labels {"lo", "hi"} are in the bounding chain.
BC = fill(2,n)
U = [U rand(n,T)]
for t = T+1:-1:1 #-Column t corresponds to time -(t-1).
for i = 1:n
ss = 0.0 #"as small as possible" neighbor sum
sl = 0.0 #"as large as possible" neighbor sum
for j in adjlist[i]
λij = Λ[i,j]
if BC[j] == 2
if λij < 0
ss = ss + λij * (hi - μ[j])
sl = sl + λij * (lo - μ[j])
else
ss = ss + λij * (lo - μ[j])
sl = sl + λij * (hi - μ[j])
end
elseif BC[j] == 1
ss = ss + λij * (hi - μ[j])
sl = sl + λij * (hi - μ[j])
else
ss = ss + λij * (lo - μ[j])
sl = sl + λij * (lo - μ[j])
end
end
pss = 1 / ( 1 + exp((lo-hi)*(α[i] + ss)) )
psl = 1 / ( 1 + exp((lo-hi)*(α[i] + sl)) )
check1 = U[i,t] < pss
check2 = U[i,t] < psl
if check1 && check2
BC[i] = 1
elseif check1 || check2
BC[i] = 2
else
BC[i] = 0
end
end
end
coalesce = all(BC .!= 2)
if verbose
println("Started from -$(T): $(sum(BC .== 2)) elements not coalesced.")
end
T = 2*T
end
vals = [lo, hi]
if average
for i in 1:n
temp[i] = temp[i] + vals[BC[i] + 1]
end
else
for i in 1:n
temp[i,rep] = vals[BC[i] + 1]
end
end
if verbose
println("finished draw $(rep) of $(k)")
end
end
if average
return map(x -> (x - k*lo)/(k*(hi-lo)), temp)
else
return temp
end
end
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 4566 | """
SimplePairwise
Pairwise association matrix, parametrized as a scalar parameter times the adjacency matrix.
# Constructors
SimplePairwise(G::SimpleGraph, count::Int=1)
SimplePairwise(n::Int, count::Int=1)
SimplePairwise(λ::Real, G::SimpleGraph)
SimplePairwise(λ::Real, G::SimpleGraph, count::Int)
If provide only a graph, set λ = 0. If provide only an integer, set λ = 0 and make a totally
disconnected graph. If provide a graph and a scalar, convert the scalar to a length-1
vector.
Every observation must have the same association matrix in this case.
So while we internally treat it like an n-by-n-by-m matrix, just return a 2D n-by-n matrix
to the user.
# Examples
```jldoctests
julia> g = makegrid4(2,2).G;
julia> λ = 1.0;
julia> p = SimplePairwise(λ, g, 4); #-4 observations
julia> size(p)
(4, 4, 4)
julia> Matrix(p[:,:,:])
4×4 Array{Float64,2}:
0.0 1.0 1.0 0.0
1.0 0.0 0.0 1.0
1.0 0.0 0.0 1.0
0.0 1.0 1.0 0.0
```
"""
mutable struct SimplePairwise <: AbstractPairwiseParameter
λ::Vector{Float64}
G::SimpleGraph{Int}
count::Int
A::SparseMatrixCSC{Float64,Int64}
function SimplePairwise(lam, g, m)
if length(lam) !== 1
error("SimplePairwise: λ must have length 1")
end
if m < 1
error("SimplePairwise: count must be positive")
end
new(lam, g, m, adjacency_matrix(g, Float64))
end
end
# Constructors
# - If provide only a graph, set λ = 0.
# - If provide only an integer, set λ = 0 and make a totally disconnected graph.
# - If provide a graph and a scalar, convert the scalar to a length-1 vector.
SimplePairwise(G::SimpleGraph, count::Int=1) = SimplePairwise([0.0], G, count)
SimplePairwise(n::Int, count::Int=1) = SimplePairwise(0, SimpleGraph(n), count)
SimplePairwise(λ::Real, G::SimpleGraph) = SimplePairwise([(Float64)(λ)], G, 1)
SimplePairwise(λ::Real, G::SimpleGraph, count::Int) = SimplePairwise([(Float64)(λ)], G, count)
#---- AbstractArray methods ---- (following sparsematrix.jl)
# getindex - implementations
getindex(p::SimplePairwise, i::Int, j::Int) = p.λ[1] * p.A[i, j]
getindex(p::SimplePairwise, i::Int) = p.λ[1] * p.A[i]
getindex(p::SimplePairwise, ::Colon, ::Colon) = p.λ[1] * p.A
getindex(p::SimplePairwise, I::AbstractArray) = p.λ[1] * p.A[I]
getindex(p::SimplePairwise, I::AbstractVector, J::AbstractVector) = p.λ[1] * p.A[I,J]
# getindex - translations
getindex(p::SimplePairwise, I::Tuple{Integer, Integer}) = p[I[1], I[2]]
getindex(p::SimplePairwise, I::Tuple{Integer, Integer, Integer}) = p[I[1], I[2]]
getindex(p::SimplePairwise, i::Int, j::Int, r::Int) = p[i,j]
getindex(p::SimplePairwise, ::Colon, ::Colon, ::Colon) = p[:,:]
getindex(p::SimplePairwise, ::Colon, ::Colon, r::Int) = p[:,:]
getindex(p::SimplePairwise, ::Colon, j) = p[1:size(p.A,1), j]
getindex(p::SimplePairwise, i, ::Colon) = p[i, 1:size(p.A,2)]
getindex(p::SimplePairwise, ::Colon, j, r) = p[:,j]
getindex(p::SimplePairwise, i, ::Colon, r) = p[i,:]
getindex(p::SimplePairwise, I::AbstractVector{Bool}, J::AbstractRange{<:Integer}) = p[findall(I),J]
getindex(p::SimplePairwise, I::AbstractRange{<:Integer}, J::AbstractVector{Bool}) = p[I,findall(J)]
getindex(p::SimplePairwise, I::Integer, J::AbstractVector{Bool}) = p[I,findall(J)]
getindex(p::SimplePairwise, I::AbstractVector{Bool}, J::Integer) = p[findall(I),J]
getindex(p::SimplePairwise, I::AbstractVector{Bool}, J::AbstractVector{Bool}) = p[findall(I),findall(J)]
getindex(p::SimplePairwise, I::AbstractVector{<:Integer}, J::AbstractVector{Bool}) = p[I,findall(J)]
getindex(p::SimplePairwise, I::AbstractVector{Bool}, J::AbstractVector{<:Integer}) = p[findall(I),J]
# setindex!
setindex!(p::SimplePairwise, i::Int, j::Int) =
error("Pairwise values cannot be set directly. Use setparameters! instead.")
setindex!(p::SimplePairwise, i::Int, j::Int, k::Int) =
error("Pairwise values cannot be set directly. Use setparameters! instead.")
setindex!(p::SimplePairwise, i::Int) =
error("Pairwise values cannot be set directly. Use setparameters! instead.")
#---- AbstractPairwiseParameter interface methods ----
getparameters(p::SimplePairwise) = p.λ
function setparameters!(p::SimplePairwise, newpar::Vector{Float64})
p.λ = newpar
end
#---- to be used in show methods ----
function showfields(p::SimplePairwise, leadspaces=0)
spc = repeat(" ", leadspaces)
return spc * "λ $(p.λ) (association parameter)\n" *
spc * "G the graph ($(nv(p.G)) vertices, $(ne(p.G)) edges)\n" *
spc * "count $(p.count) (the number of observations)\n" *
spc * "A $(size2string(p.A)) SparseMatrixCSC (the adjacency matrix)\n"
end
| Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | code | 16723 | using Test
using Graphs, LinearAlgebra
using Autologistic
println("Running tests:")
@testset "FullUnary constructors and interfaces" begin
M = [1.1 4.4 7.7
2.2 5.5 8.8
3.3 4.4 9.9]
u1 = FullUnary(M[:,1])
u2 = FullUnary(M)
@test sprint(io -> show(io,u1)) == "3×1 FullUnary"
@test u1[2] == 2.2
@test u2[2,3] == 8.8
@test size(u1) == (3,1)
@test size(u2) == (3,3)
@test getparameters(u1) == [1.1; 2.2; 3.3]
setparameters!(u1, [0.1, 0.2, 0.3])
setparameters!(u2, [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
u1[2] = 2.22
u2[2,3] = 8.88
u3 = FullUnary(10)
u4 = FullUnary(10,4)
@test size(u3) == (10,1)
@test size(u4) == (10,4)
end
@testset "LinPredUnary constructors and interfaces" begin
X1 = [1.0 2.0 3.0
1.0 4.0 5.0
1.0 6.0 7.0
1.0 8.0 9.0]
X = cat(X1, 2*X1, dims=3)
beta = [1.0, 2.0, 3.0]
u1 = LinPredUnary(X, beta)
u2 = LinPredUnary(X1, beta)
u3 = LinPredUnary(X1)
u4 = LinPredUnary(X)
u5 = LinPredUnary(4, 3)
u6 = LinPredUnary(4, 3, 2)
Xbeta = [14.0 28.0
24.0 48.0
34.0 68.0
44.0 88.0]
X1beta = reshape(Xbeta[:,1], (4,1))
@test size(u1) == size(u4) == size(u6) == (4,2)
@test size(u2) == size(u3) == size(u5) == (4,1)
@test u1[3,2] == u1[7] == 68.0
@test u1[[1 2; 2 2]] == [14.0 24.0; 24.0 24.0]
@test getparameters(u1) == beta
setparameters!(u1, [2.0, 3.0, 4.0])
@test getparameters(u1) == [2.0, 3.0, 4.0]
@test_throws Exception LinPredUnary(X, [1,2])
end
@testset "SimplePairwise constructors and interfaces" begin
n = 10
m = 3
λ = 1.0
G = Graph(n, Int(floor(n*(n-1)/4)))
p1 = SimplePairwise([λ], G, m)
p2 = SimplePairwise(G)
p3 = SimplePairwise(G, m)
p4 = SimplePairwise(n)
p5 = SimplePairwise(n, m)
p6 = SimplePairwise(λ, G)
p7 = SimplePairwise(λ, G, m)
@test sprint(io -> show(io,p1)) == "10×10×3 SimplePairwise"
@test any(i -> (i!==(n,n,m)), [size(j) for j in [p1, p2, p3, p4, p5, p6, p7]])
@test p1[2,2,2] == p1[2,2] == λ*adjacency_matrix(G,Float64)[2,2]
setparameters!(p1, [2.0])
@test getparameters(p1) == [2.0]
@test_throws Exception SimplePairwise([1,2],G,m)
@test_throws Exception SimplePairwise([1],G,0)
end
@testset "FullPairwise constructors and interfaces" begin
nvert = 10
nedge = 20
m = 3
G = Graph(nvert, nedge)
λ = rand(-1.1:0.2:1.1, nedge)
p1 = FullPairwise(λ, G, m)
p2 = FullPairwise(G)
p3 = FullPairwise(G, m)
p4 = FullPairwise(nvert)
p5 = FullPairwise(nvert, m)
p6 = FullPairwise(λ, G)
p7 = FullPairwise(λ, G, m)
@test any(i -> (i!==(nvert,nvert,m)), [size(j) for j in [p1, p2, p3, p4, p5, p6, p7]])
@test (adjacency_matrix(G) .!= 0) == (p1.Λ .!= 0)
@test p1[2,2,2] == p1[2,2]
newpar = rand(-1.1:0.2:1.1, nedge)
setparameters!(p1, newpar)
@test (adjacency_matrix(G) .!= 0) == (p1.Λ .!= 0)
fromΛ = []
for i = 1:nvert
for j = i+1:nvert
if p1.Λ[i,j] != 0
push!(fromΛ, p1.Λ[i,j])
end
end
end
@test newpar == p1.λ == fromΛ == getparameters(p1)
end
@testset "ALRsimple constructors" begin
for r in [1 5]
(n, p, m) = (100, 4, r)
X = rand(n,p,m)
β = [1.0, 2.0, 3.0, 4.0]
Y = makebool(round.(rand(n,m)), (0.0, 1.0))
unary = LinPredUnary(X, β)
pairwise = SimplePairwise(n, m)
coords = [(rand(),rand()) for i=1:n]
m1 = ALRsimple(Y, unary, pairwise, none, (-1.0,1.0), ("low","high"), coords)
m2 = ALRsimple(unary, pairwise)
m3 = ALRsimple(Graph(n, Int(floor(n*(n-1)/4))), X, Y=Y, β=β, λ = 1.0)
@test getparameters(m3) == [β; 1.0]
@test getunaryparameters(m3) == β
@test getpairwiseparameters(m3) == [1.0]
setparameters!(m1, [1.1, 2.2, 3.3, 4.4, -1.0])
setunaryparameters!(m2, [1.1, 2.2, 3.3, 4.4])
setpairwiseparameters!(m2, [-1.0])
@test getparameters(m1) == getparameters(m2) == [1.1, 2.2, 3.3, 4.4, -1.0]
end
end
@testset "ALsimple constructors" begin
for r in [1 5]
(n, m) = (100, r)
alpha = rand(n, m)
Y = makebool(round.(rand(n, m)), (0.0, 1.0))
unary = FullUnary(alpha)
pairwise = SimplePairwise(n, m)
coords = [(rand(), rand()) for i=1:n]
G = Graph(n, Int(floor(n*(n-1)/4)))
m1 = ALsimple(Y, unary, pairwise, none, (-1.0,1.0), ("low","high"), coords)
m2 = ALsimple(unary, pairwise)
m3 = ALsimple(G, alpha, λ=1.0)
m4 = ALsimple(G, m)
@test getparameters(m1) == [alpha[:]; 0.0]
@test getparameters(m2) == [alpha[:]; 0.0]
@test getparameters(m3) == [alpha[:]; 1.0]
@test size(m4.unary) == (n, m)
@test getunaryparameters(m1) == alpha[:]
@test getpairwiseparameters(m3) == [1.0]
setunaryparameters!(m3, 2*alpha[:])
setpairwiseparameters!(m3, [2.0])
setparameters!(m4, [2*alpha[:]; 2.0])
@test getparameters(m3) == getparameters(m4) == [2*alpha[:]; 2.0]
end
end
@testset "ALfull constructors" begin
g = Graph(10, 20);
alpha = zeros(10, 4);
lambda = rand(20);
Y = rand([0, 1], 10, 4);
u = FullUnary(alpha);
p = FullPairwise(g, 4);
setparameters!(p, lambda)
model1 = ALfull(u, p, Y=Y);
model2 = ALfull(g, alpha, lambda, Y=Y);
model3 = ALfull(g, 4, Y=Y);
setparameters!(model3, [alpha[:]; lambda])
@test all([getfield(model1, fn)==getfield(model2, fn)==getfield(model3, fn)
for fn in fieldnames(ALfull)])
@test getparameters(model1) == [alpha[:]; lambda]
@test getunaryparameters(model1) == alpha[:]
@test getpairwiseparameters(model1) == lambda
@test size(model2.unary) == (10, 4)
@test size(model2.pairwise) == (10, 10, 4)
setparameters!(model3, [2 .* alpha[:]; 2 .* lambda])
setunaryparameters!(model2, 2 .* alpha[:])
setpairwiseparameters!(model2, 2 .* lambda)
@test getparameters(model3) ≈ getparameters(model2) ≈ [2*alpha[:]; 2*lambda]
end
@testset "Helper functions" begin
# --- makebool() ---
y1 = [false, false, true]
y2 = [1 2; 1 2]
y3 = [1.0 2.0; 1.0 2.0]
y4 = ["yes", "no", "no"]
y5 = ones(10,3)
@test makebool(y1) == reshape([false, false, true], (3,1))
@test makebool(y2) == makebool(y3) == [false true; false true]
@test makebool(y4) == reshape([true, false, false], (3,1))
@test makebool(y5, (0,1)) == fill(true, 10, 3)
@test makebool(y5, (1,2)) == fill(false, 10, 3)
# --- makecoded() ---
M1 = ALRsimple(Graph(4,3), rand(4,2), Y=[true, false, false, true], coding=(-1,1))
@test makecoded(M1) == reshape([1, -1, -1, 1], (4,1))
@test makecoded([true, false, false, true], M1.coding) == reshape([1, -1, -1, 1], (4,1))
# --- makegrid4() and makegrid8() ---
out4 = makegrid4(11, 21, (-1,1), (-10,10))
@test out4.locs[11*10 + 6] == (0.0, 0.0)
@test nv(out4.G) == 11*21
@test ne(out4.G) == 11*20 + 21*10
out8 = makegrid8(11, 21, (-1,1), (-10,10))
@test out8.locs[11*10 + 6] == (0.0, 0.0)
@test nv(out8.G) == 11*21
@test ne(out8.G) == 11*20 + 21*10 + 2*20*10
# --- makespatialgraph() ---
coords = [(Float64(i), Float64(j)) for i = 1:5 for j = 1:5]
out = makespatialgraph(coords, sqrt(2))
@test ne(out.G) == 2*4*5 + 2*4*4
# --- hess() ---
fcn(x) = x[1]^2 + 2x[2]^2 + x[1]*x[2]
H = Autologistic.hess(fcn, [1, 1])
@test isapprox(H, [[2; 1] [1; 4]], atol=0.001)
end
@testset "almodel_functions" begin
# --- centeringterms() ---
M1 = ALRsimple(Graph(4,3), rand(4,2), Y=[true, false, false, true], coding=(-1,1))
@test centeringterms(M1) == zeros(4,1)
@test centeringterms(M1, onehalf) == ones(4,1)./2
@test centeringterms(M1, expectation) == zeros(4,1)
M2 = ALRsimple(makegrid4(2,2)[1], ones(4,2,3), β = [1.0, 1.0], centering = expectation,
coding = (0,1), Y = repeat([true, true, false, false],1,3))
@test centeringterms(M2) ≈ ℯ^2/(1+ℯ^2) .* ones(4,3)
# --- negpotential(), loglikelihood(), and negloglik!---
setpairwiseparameters!(M2, [1.0])
@test negpotential(M2) ≈ 1.4768116880884703 * ones(3,1)
@test loglikelihood(M2) ≈ -11.86986109487605
@test Autologistic.negloglik!([1.0, 1.0, 1.0], M2) ≈ 11.86986109487605
M = ALsimple(makegrid4(3,3).G, ones(9))
f = fullPMF(M)
@test exp(negpotential(M)[1])/f.partition ≈ exp(loglikelihood(M))
# --- pseudolikelihood() ---
X = [1.1 2.2
1.0 2.0
2.1 1.2
3.0 0.3]
Y = [0; 0; 1; 0]
M3 = ALRsimple(makegrid4(2,2)[1], cat(X,X,dims=3), Y=cat(Y,Y,dims=2),
β=[-0.5, 1.5], λ=1.25, centering=expectation)
@test pseudolikelihood(M3) ≈ 12.333549445795818
# --- fullPMF() ---
M4 = ALRsimple(Graph(3,0), reshape([-1. 0. 1. -1. 0. 1.],(3,1,2)), β=[1.0])
pmf = fullPMF(M4)
probs = [0.0524968; 0.387902; 0.0524968; 0.387902; 0.00710467; 0.0524968;
0.00710467; 0.0524968]
@test pmf.partition ≈ 19.04878276433453 * ones(2)
@test pmf.table[:,4,1] == pmf.table[:,4,2]
@test isapprox(pmf.table[:,4,1], probs, atol=1e-6)
# --- marginalprobabilities() ---
truemp = [0.1192029 0.1192029; 0.5 0.5; 0.8807971 0.8807971]
@test isapprox(marginalprobabilities(M4), truemp, atol=1e-6)
@test isapprox(marginalprobabilities(M4,indices=2), truemp[:,2], atol=1e-6)
# --- conditionalprobabilities() ---
lam = 0.5
a, b, c = (-1.2, 0.25, 1.5)
y1, y2, y3 = (-1.0, 1.0, 1.0)
ns1, ns2, ns3 = lam .* (y2+y3, y1+y3, y1+y2)
cp1 = exp(a+ns1) / (exp(-(a+ns1)) + exp(a+ns1))
cp2 = exp(b+ns2) / (exp(-(b+ns2)) + exp(b+ns2))
cp3 = exp(c+ns3) / (exp(-(c+ns3)) + exp(c+ns3))
M = ALsimple(FullUnary([a, b, c]), SimplePairwise(lam, Graph(3,3)), Y=[y1,y2,y3])
@test isapprox(conditionalprobabilities(M), [cp1, cp2, cp3])
@test isapprox(conditionalprobabilities(M, vertices=[1,3]), [cp1, cp3])
Y = [ones(9) zeros(9)]
model = ALsimple(makegrid4(3,3).G, ones(9,2), Y=Y, λ=0.5)
@test isapprox(conditionalprobabilities(model, vertices=5), [0.997527377 0.119202922])
@test isapprox(conditionalprobabilities(model, indices=2),
[0.5; 0.26894142; 0.5; 0.2689414213; 0.119202922; 0.26894142; 0.5; 0.26894142; 0.5])
end
@testset "samplers" begin
M5 = ALRsimple(makegrid4(4,4)[1], rand(16,1))
out1 = sample(M5, 10000, average=false)
@test all(x->isapprox(x,0.5,atol=0.05), sum(out1.==1, dims=2)/10000)
out2 = sample(M5, 10000, average=true, burnin=100, config=rand([1,2], 16))
@test all(x->isapprox(x,0.5,atol=0.05), out2)
M6 = ALRsimple(makegrid4(3,3)[1], rand(9,2))
setparameters!(M6, [-0.5, 0.5, 0.2])
marg = marginalprobabilities(M6)
out3 = sample(M6, 10000, method=perfect_read_once, average=true)
out4 = sample(M6, 10000, method=perfect_reuse_samples, average=true)
out5 = sample(M6, 10000, method=perfect_reuse_seeds, average=true)
out6 = sample(M6, 10000, method=perfect_bounding_chain, average=true)
@test isapprox(out3, marg, atol=0.04, norm=x->norm(x,Inf))
@test isapprox(out4, marg, atol=0.04, norm=x->norm(x,Inf))
@test isapprox(out5, marg, atol=0.04, norm=x->norm(x,Inf))
@test isapprox(out6, marg, atol=0.04, norm=x->norm(x,Inf))
tbl = fullPMF(M6).table
checkthree(x) = all(x[1:3] .== -1.0)
threelow = sum(mapslices(x -> checkthree(x) ? x[10] : 0.0, tbl, dims=2))
out7 = sample(M6, 10000, method=perfect_read_once, average=false)
est7 = sum(mapslices(x -> checkthree(x) ? 1.0/10000.0 : 0.0, out7, dims=1))
out8 = sample(M6, 10000, method=perfect_reuse_samples, average=false)
est8 = sum(mapslices(x -> checkthree(x) ? 1.0/10000.0 : 0.0, out8, dims=1))
out9 = sample(M6, 10000, method=perfect_reuse_seeds, average=false)
est9 = sum(mapslices(x -> checkthree(x) ? 1.0/10000.0 : 0.0, out9, dims=1))
out10 = sample(M6, 10000, method=perfect_bounding_chain, average=false)
est10 = sum(mapslices(x -> checkthree(x) ? 1.0/10000.0 : 0.0, out10, dims=1))
@test isapprox(est7, threelow, atol=0.03)
@test isapprox(est8, threelow, atol=0.03)
@test isapprox(est9, threelow, atol=0.03)
@test isapprox(est10, threelow, atol=0.03)
nobs = 4
M7 = ALRsimple(makegrid8(3,3)[1], rand(9,2,nobs))
setparameters!(M7, [-0.5, 0.5, 0.2])
out11 = sample(M7, 100)
@test size(out11) == (9, nobs, 100)
out12 = sample(M7, 100, average=true)
@test size(out12) == (9, nobs)
out13 = sample(M7, 10, indices=1:2)
@test size(out13) == (9, 2, 10)
out14 = sample(M7, 1, indices=nobs)
@test size(out14) == (9,)
out15 = sample(M7, 1, indices=1:3)
@test size(out15) == (9, 3)
marg = marginalprobabilities(M7)
out16 = sample(M7, 10000, method=perfect_read_once, average=true)
@test isapprox(out16[:], marg[:], atol=0.03, norm=x->norm(x,Inf))
M8 = ALsimple(complete_graph(10), zeros(10))
samp = sample(M8, 10000, method=Gibbs, skip=2, average=true)
@test isapprox(samp, fill(0.5, 10), atol=0.03, norm=x->norm(x,Inf))
end
@testset "ML and PL Estimation" begin
G = makegrid4(4,3).G
model = ALRsimple(G, ones(12,1), Y=[fill(-1,4); fill(1,8)])
mle = fit_ml!(model)
@test isapprox(mle.estimate, [0.07915; 0.4249], atol=0.001)
mle2 = fit_ml!(model, start=[0.07; 0.4], verbose=true, iterations=30, show_trace=true)
@test isapprox(mle2.estimate, [0.07915; 0.4249], atol=0.001)
@test isapprox(mle2.pvalues, [0.6279; 0.0511], atol=0.001)
mleERR = fit_ml!(model, start=[1000, 1000])
@test typeof(mleERR.optim) <: Exception
tup1, tup2 = Autologistic.splitkw((method=Gibbs, iterations=1000, average=true,
show_trace=false))
@test tup1 == (show_trace = false, iterations = 1000)
@test tup2 == (method = Gibbs, average = true)
oldY = model.responses
oldpar = getparameters(model)
theboot = oneboot(model, method=Gibbs)
@test model.responses == oldY
@test getparameters(model) == oldpar
theboot2 = oneboot(model, [0.1,0.02])
@test getparameters(model) == oldpar
@test keys(theboot) == keys(theboot2) == (:sample, :estimate, :convergence)
@test map(x -> size(x), values(theboot)) == ((12,), (2,), ())
Y=[[fill(-1,4); fill(1,8)] [fill(-1,3); fill(1,9)] [fill(-1,5); fill(1,7)]]
model2 = ALRsimple(G, ones(12,1,3), Y=Y)
fit = fit_pl!(model2, start=[-0.4, 1.1])
@test isapprox(fit.estimate, [-0.390104; 1.10103], atol=0.001)
fitERR = fit_pl!(model2, start=[1000, 1000])
@test typeof(fitERR.optim) <: Exception
boots1 = [oneboot(model2, start=[-0.4, 1.1]) for i = 1:10]
samps = zeros(12,3,10)
ests = zeros(2,10)
convs = fill(false, 10)
for i = 1:10
samps[:,:,i] = boots1[i].sample
ests[:,i] = boots1[i].estimate
convs[i] = boots1[i].convergence
end
addboot!(fit, boots1)
addboot!(fit, samps, ests, convs)
@test size(fit.bootsamples) == (12,3,20)
@test length(fit.convergence) == 20
@test fit.bootestimates[:,1:10] == fit.bootestimates[:,11:20]
G3 = makegrid4(7,7)
model3 = ALRsimple(G3.G, [-ones(15,1); ones(34,1)])
Y = ones(49)
Y[[3, 5, 7, 12, 14, 17, 18, 22, 23, 24, 25, 27, 30, 31, 34, 35, 36, 37, 40, 43,
44, 45, 46, 48, 49]] .= -1.0
model3.responses = makebool(Y)
fit = fit_pl!(model3, start=[-0.25, -0.06], nboot=100)
@test isapprox(fit.estimate, [-0.26976, -0.06015], atol=0.001)
end
@testset "ALfit type" begin
tst = ALfit()
@test Autologistic.showfields(tst) == "(all fields empty)\n"
tst.estimate = rand(10)
@test Autologistic.showfields(tst) ==
"estimate 10-element vector of parameter estimates\n"
sm = ["yes" "no" "maybe";
"1.2345" "123.45" "12345";
"12.345" "1.2345" "12345"]
Autologistic.align!(sm, 1, '.')
Autologistic.align!(sm, 2, '.')
Autologistic.align!(sm, 3, 'x')
@test sm[:,1] == ["yes"; " 1.2345"; "12.345"]
@test sm[:,2] == ["no"; "123.45"; " 1.2345"]
@test sm[:,3] == ["maybe"; "12345"; "12345"]
end | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | docs | 2915 | # Autologistic
[](https://kramsretlow.github.io/Autologistic.jl/stable)
[](https://kramsretlow.github.io/Autologistic.jl/dev)
[](https://travis-ci.com/kramsretlow/Autologistic.jl)
[](https://codecov.io/gh/kramsretlow/Autologistic.jl)
A Julia package for computing with the autologistic (Ising) probability model
and performing autologistic regression.
Autologistic regression is like an extension of logistic regression that allows the binary
responses to be correlated. An undirected graph is used to encode the association structure
among the responses.
The package follows the treatment of this model given in the paper
[Better Autologistic Regression](https://doi.org/10.3389/fams.2017.00024). As described in
that paper, different variants of "the" autologistic regression model are actually different
probability models. One reason this package was created was to allow researchers to compare
the performance of the different model variants. You can create different variants of
the model easily and fit them using either maximum likelihood (for small-n cases) or maximum
pseudolikelihood (for large-n cases).
At present only the most common "simple" form of the model--with a single parameter
controlling the association strength everywhere in graph--is implemented. But the
package is designed to be extensible. In future different ways of parametrizing
the association could be added.
Much more detail is provided in the [documentation](https://kramsretlow.github.io/Autologistic.jl/stable).
**NOTE:** As of `v0.5.0`, `Autologistic.jl` uses `Graphs.jl` to represent its graphs. Prior versions
used the predecessor package `LightGraphs.jl`. You may need to update earlier code if you were supplying
graphs into autologistic types.
```julia
# To get a feeling for the package facilities.
# The package uses Graphs.jl for graphs.
using Autologistic, Graphs
g = Graph(100, 400) #-Create a random graph (100 vertices, 400 edges)
X = [ones(100) rand(100,3)] #-A matrix of predictors.
Y = rand([0, 1], 100) #-A vector of binary responses.
model = ALRsimple(g, X, Y=Y) #-Create autologistic regression model
# Estimate parameters using pseudolikelihood. Do parametric bootstrap
# for error estimation. Draw bootstrap samples using perfect sampling.
fit = fit_pl!(model, nboot=2000, method=perfect_read_once)
# Draw samples from the fitted model and get the average to estimate
# the marginal probability distribution. Use a different perfect sampling
# algorithm.
marginal = sample(model, 1000, method=perfect_bounding_chain, average=true)
``` | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | docs | 7931 | # Background
This section provides a brief overview of the autologistic model, to establish some
conventions and terminology that will help you to make appropriate use of `Autologistic.jl`.
The package is concerned with the analysis of dichotomous data: categorical observations
that can take two possible values (low or high, alive or dead, present or absent, etc.).
It is common to refer to such data as binary, and to use numeric values 0 and 1 to
represent the two states. We do not have to use 0 and 1, however: any other pair of numbers
could be used instead. This might seem like a small detail, but for autologistic
regression models, the choice of numeric coding is very important. The pair of
values used to represent the two states is called the **coding**.
!!! note "Important Fact 1"
For ALR models, two otherwise-identical models that differ only in their coding will
generally **not** be equivalent probability models. Changing the coding fundamentally
changes the model. For [a variety of reasons](https://doi.org/10.3389/fams.2017.00024),
the ``(-1,1)`` coding is strongly recommended, and is used by default.
When responses are independent (conditional on the covariates), logistic regression is the
most common model. For cases where the independence assumption might not hold—responses are
correlated, even after including covariate effects—autologistic models are a useful
option. They revert to logistic regression models when their association parameters are
set to zero.
## The Autologistic (AL) Model
Let ``\mathbf{Y}`` be a vector of ``n`` dichotomous random variables, expressed using any
chosen coding. The AL model is a probability model for the joint distribution of the variables.
Under the AL model, the joint probability mass function
(PMF) of the random vector is:
```math
\Pr(\mathbf{Y}=\mathbf{y}) \propto \exp\left(\mathbf{y}^T\boldsymbol{\alpha} -
\mathbf{y}^T\boldsymbol{\Lambda}\boldsymbol{\mu} +
\frac{1}{2}\mathbf{y}^T\boldsymbol{\Lambda}\mathbf{y}\right)
```
The model is only specified up to a proportionality constant. The proportionality constant
(sometimes called the "partition function") is intractable for even moderately large ``n``:
evaluating it requires computing the right hand side of the above equation for ``2^n``
possible configurations of the dichotomous responses.
Inside the exponential above, there are three terms:
* The first term is the **unary** term, and ``\mathbf{\alpha}`` is called the
**unary parameter**. It summarizes each variable's endogenous tendency to take the "high"
state. Larger positive ``\alpha_i`` values make random variable ``Y_i`` more likely to take
the "high" value. Note that in practical models, ``\mathbf{\alpha}`` could be expressed
in terms of some other parameters.
* The second term is an optional **centering** term, and the value ``\mu_i`` is called the
centering adjustment for variable ``i``. The package includes different options
for centering, in the [`CenteringKinds`](@ref) enumeration. Setting centering to `none`
will set the centering adjustment to zero; setting centering to `expectation` will use the
centering adjustment of the "centered autologistic model" that has appeared in the
literature (e.g. [here](https://link.springer.com/article/10.1198/jabes.2009.07032) and
[here](https://doi.org/10.1002/env.1102)).
!!! note "Important Fact 2"
Just as with coding, changing an un-centered model to a centered one is not a minor
change. It produces a different probability model entirely. There is evidence
that [centering has drawbacks](https://doi.org/10.3389/fams.2017.00024), so the
uncentered model is used by default.
* The third term is the **pairwise** term, which handles the association between the
random variables. Parameter ``\boldsymbol{\Lambda}`` is a symmetric matrix. If it has
a nonzero entry at position ``(i,j)``, then variables ``i`` and ``j`` share an edge in the
graph associated with the model, and the value of the entry controls the strength of
association between those two variables. ``\boldsymbol{\Lambda}`` can be parametrized in
different ways. The simplest and most common option is to let
``\boldsymbol{\Lambda} = \lambda\mathbf{A}``, where ``\mathbf{A}`` is the adjacency
matrix of the graph. This "simple pairwise" option has only a single association
parameter, ``\lambda``.
The autogologistic model is a
[probabilistic graphical model](https://en.wikipedia.org/wiki/Graphical_model), more
specifically a [Markov random field](https://en.wikipedia.org/wiki/Markov_random_field),
meaning it has an undirected graph that encodes conditional probability relationships among
the variables. `Autologistic.jl` uses `Graphs.jl` to represent the graph.
## The Autologistic Regression (ALR) Model
The AL model becomes an ALR model when the unary parameter is written as a linear
predictor:
```math
\Pr(\mathbf{Y}=\mathbf{y}) \propto \exp\left(\mathbf{y}^T\mathbf{X}\boldsymbol{\beta} -
\mathbf{y}^T\boldsymbol{\Lambda}\boldsymbol{\mu} +
\frac{1}{2}\mathbf{y}^T\boldsymbol{\Lambda}\mathbf{y}\right)
```
where ``\mathbf{X}`` is a matrix of predictors/covariates, and ``\boldsymbol{\beta}``
is a vector of regression coefficients.
Note that because the responses are correlated, we treat each vector ``\mathbf{y}`` as
a single observation, consisting of a set of "variables," "vertices," or "responses." If
the number of variables is large enough, the model can be fit with only one observation.
With more than one observation, we can write the data as
``(\mathbf{y}_1, \mathbf{X}_1), \ldots (\mathbf{y}_m, \mathbf{X}_m)``. Each observation is
a vector with its own matrix of predictor values.
## The Symmetric Model and Logistic Regression
Autologistic models can be expressed in a conditional log odds form. Let ``\pi_i`` be
the probability that variable ``i`` takes the high level, conditional on the values of all
of its neighbors. Then the AL model implies
```math
\text{logit}(\pi_i) = (h-\ell)(\alpha_i + \sum_{j\sim i}\lambda_{ij}(y_j - \mu_j)),
```
where ``(\ell, h)`` is the coding, ``\lambda_{ij}`` is the ``(i,j)``th element of
``\Lambda``, and ``j\sim i`` means "all variables that are neighbors of ``i``".
The conditional form illustrates the link between ALR models and logistic
regression. In the ALR model, ``\alpha_i = \mathbf{X}_i\boldsymbol{\beta}``. If all
``\lambda_{ij}=0`` and the coding is ``(0,1)``, the model becomes a logistic regression
model.
If we fit an ALR model to a data set, it is natural to wonder how the regression
coefficients compare to the logistic regression model, which assumes independence.
Unfortunately, the coefficients of the preferred "symmetric" ALR model are not immediately
comparable to logistic regression coefficients, because it uses ``(-1,1)``
coding. It is not hard to make the model comparable, however.
!!! note "The symmetric ALR model with (0,1) coding"
The symmetric ALR model with ``(-1, 1)`` coding is equivalent to a model with ``(0,1)``
coding and a constant centering adjustment of 0.5.
If the original symmetric model has coefficients ``(β, Λ)``, the transformed model with
``(0,1)`` coding has coefficients ``(2β, 4Λ)``. The transformed model's coefficients
can be directly compared to logistic regression effect sizes.
This means there are two ways to compare the symmetric ALR model to a logistic
regression model. Either
1. (recommended) Fit the ``(-1,1)`` `ALRsimple` model and transform the parameters, or
2. Fit an `ALRsimple` model with `coding=(0,1)` and `centering=onehalf`.
Both of the above options are illustrated in the [Comparison to logistic regression](@ref)
section of the [Examples](@ref) in this manual. | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | docs | 3796 | # Basic Usage
Typical usage of the package will involve the following three steps:
**1. Create a model object.**
All particular AL/ALR models are instances of subtypes of
[`AbstractAutologisticModel`](@ref). Each subtype is defined by a particular choice
for the parametrization of the unary and pairwise parts. At present the options
are:
* [`ALfull`](@ref): A model with type [`FullUnary`](@ref) as the unary part, and type
[`FullPairwise`](@ref) as the pairwise part (parameters ``α, Λ``).
* [`ALsimple`](@ref): A model with type [`FullUnary`](@ref) as the unary part, and type
[`SimplePairwise`](@ref) as the pairwise part (parameters ``α, λ``).
* [`ALRsimple`](@ref): A model with type [`LinPredUnary`](@ref) as the unary part, and type
[`SimplePairwise`](@ref) as the pairwise part (parameters ``β, λ``).
The first two types above are mostly for research or exploration purposes. Most users doing
data analysis will use the `ALRsimple` model.
Each of the above types have various constructors defined. For example, `ALRsimple(G, X)`
will create an `ALRsimple` model with graph `G` and predictor matrix `X`. Type, e.g.,
`?ALRsimple` at the REPL to see the constructors.
Any of the above model types can be used with any of the supported forms of centering, and
with any desired coding. Centering and coding can be set at the time of construction, or
the `centering` and `coding` fields of the type can be mutated to change the default
choices.
**2. Set parameters.**
Depending on the constructor used, the model just initialized will have either default
parameter values or user-specified parameter values. Usually
it will be desired to choose some appropriate values from data.
* [`fit_ml!`](@ref) uses maximum likelihood to estimate the parameters. It is only useful for
cases where the number of vertices in the graph is small.
* [`fit_pl!`](@ref) uses pseudolikelihood to estimate the parameters.
* [`setparameters!`](@ref), [`setunaryparameters!`](@ref), and
[`setpairwiseparameters!`](@ref) can be used to set the parameters of the model directly.
* [`getparameters`](@ref), [`getunaryparameters`](@ref), and
[`getpairwiseparameters`](@ref) can be used to retrieve the parameter values.
Changing the parameters directly, through the fields of the model object, is
discouraged. It is preferable for safety to use the above get and set functions.
**3. Inference and exploration.**
After parameter estimation, one typically wants to use the fitted model to answer
inference questions, make plots, and so on.
For small-graph cases:
* [`fit_ml!`](@ref) returns p-values and 95% confidence intervals that can be used directly.
* [`fullPMF`](@ref), [`conditionalprobabilities`](@ref), [`marginalprobabilities`](@ref) can
be used to get desired quantities from the fitted distribution.
* [`sample`](@ref) can be used to draw random samples from the fitted distribution.
For large-graph cases:
* If using [`fit_pl!`](@ref), argument `nboot` can be used to do inference by parametric
bootstrap at the time of fitting.
* After fitting, `oneboot` and `addboot` can be used to create and add parametric bootstrap
replicates after the fact.
* Sampling can be used to estimate desired quantities like marginal probabilities. The
[`sample`](@ref) function implements Gibbs sampling as well as several perfect sampling
algorithms.
Estimation by `fit_ml!` or `fit_pl!` returns an object of type `ALfit`, which holds the
parameter estimates and other information.
Plotting can be done using standard Julia capabilities. The [Examples](@ref) section
shows how to make a few relevant plots.
The [Examples](@ref) section demonstrates the usage of all of the above capabilities. | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | docs | 4615 | # Design of the Package
The package was created to satisfy the following goals:
* To make it easy for researchers to compare the different variants of the AL/ALR models
(different combinations of coding and centering), to test the claim that the symmetric
model is superior to the other alternatives.
* To facilitate analysis of real-world data sets with correlated binary responses, hopefully
with good performance.
* To create a code base that is fairly easy to extend as new extensions on AL/ALR models
are developed.
These goals guided the design of the package, which is briefly described here.
## Type Hierarchy
Three abstract types are used to define a type hierarchy that will hopefully allow the
codebase to be easily extensible. The type `AbstractAutologisticModel` is the top-level
type for AL/ALR models. Most of the functions for computing with AL/ALR models are
defined to operate on this type, so that concrete subtypes should not have to re-implement
them.
The `AbstractAutologisticModel` interface requires subtypes to have a number of fields. Two
of them are `unary` and `pairwise`, which must inherit from `AbstractUnaryParameter` and
`AbstractPairwiseParameter`, respectively. These two abstract types define interfaces for
the unary and pairwise parts of the model. Concrete subtypes of these two types represent
different ways of parametrizing the unary and pairwise terms.
For example, the most useful ALR model implemented in the package is the model with a
linear predictor as the unary parameter
(``\boldsymbol{\alpha}=\mathbf{X}\boldsymbol{\beta}``), and the "simple pairwise" assumption
for the pairwise term (``\boldsymbol{\Lambda} = \lambda\mathbf{A}``). This model has type
`ALRsimple`, with unary type `LinPredUnary` and pairwise type `SimplePairwise`. A model
of this type can be instantiated with any desired coding, and different forms of centering.
With this design, adding a new type of AL/ALR model with a different parametrization
involves
* Creating `NewUnaryType <: AbstractUnaryParameter`
* Creating `NewPairwiseType <: AbstractPairwiseParameter`
* Creating `NewModelType <: AbstractAutologisticModel`, including instances of the two new
types as its `unary` and `pairwise` fields.
This process should not be too cumbersome, as the unary and pairwise interfaces mainly
require implementing indexing and show methods. Sampling, computation of probabilities,
handling of centering, etc., is handled by fallback methods in the abstract types.
## Important Notes
Here are a few points to be aware of in using the package. For this list, let `M` be a
an AL or ALR model type.
* Responses are stored in `M.responses` as arrays of type `Bool`.
* The coding is stored separately in `M.coding`. Not storing the responses as a numeric
type makes it easier to maintain consistency when working with models that might have
different codings.
* Use functions `makebool` and `makecoded` to get to and from coded/boolean forms of the
responses.
* Parameters are always represented as **vectors** of `Float64`. If we get all the model's
parameters, they are in a single vector with unary parameters first and pairwise parameters at the end.
* The above is true even when the parameter only has length 1, as with the `SimplePairwise`
type. So you need to use square brackets, as in `setparameters!(MyPairwise, [1.0])`, when
setting the parameters in that case.
* The package uses [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl) for
representing graphs, and [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) for
optimization.
## Random Sampling
Random sampling is particularly important for AL/ALR models, because (except for very small
models), it isn't possible to evaluate the normalized PMF. Monte Carlo approaches to
estimation and inference are common with these models.
The `sample` function is provided for random sampling from an AL/ALR model. The function
takes a `method` argument, which specifies the sampling algorithm to use. Use
`?SamplingMethods` at the REPL to see the available options.
The default sampling method is Gibbs sampling, since that method will always work. But
there are several perfect (exact) sampling options provided in the package. Depending on
the model's parameters, perfect sampling will either work just fine, or be prohibitively
slow. It is recommended to use one of the perfect sampling methods if possible. The
different sampling algorithms can be compared for efficiency in particular cases. | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | docs | 24247 | # Examples
These examples demonstrate most of the functionality of the package, its typical usage, and
how to make some plots you might want to use.
The examples:
* [The Ising Model](@ref) shows how to use the package to explore the autologistic
probability distribution, without concern about covariates or parameter estimation.
* [Clustered Binary Data (Small ``n``)](@ref) shows how to use the package for regression
analysis when the graph is small enough to permit computation of the normalizing constant.
In this case standard maximum likelihood methods of inference can be used.
* [Spatial Binary Regression](@ref) shows how to use the package for autologistic regression
analysis for larger, spatially-referenced graphs. In this case pseudolikelihood is used
for estimation, and a (possibly parallelized) parametric bootstrap is used for inference.
## The Ising Model
The term "Ising model" is usually used to refer to a Markov random field of dichotomous
random variables on a regular lattice. The graph is such that each variable shares an
edge only with its nearest neighbors in each dimension. It's
a traditional model for magnetic spins, where the coding ``(-1,1)`` is usually used.
There's one parameter per vertex (a "local magnetic field") that increases or
decreases the chance of getting a ``+1`` state at that vertex; and there's a single pairwise
parameter that controls the strength of interaction between neighbor states.
In our terminology it's just an autologistic model with the appropriate graph.
Specifically, it's an `ALsimple` model: one with `FullUnary` type unary parameter, and
`SimplePairwise` type pairwise parameter.
We can create such a model once we have the graph. For example, let's create the model on a
30-by-30 lattice:
```@example Ising
using Autologistic, Random
Random.seed!(8888)
n = 30
G = makegrid4(n, n, (-1,1), (-1,1))
α = randn(n^2)
M1 = ALsimple(G.G, α)
nothing # hide
```
Above, the line `G = makegrid4(n, n, (-1,1), (-1,1))` produces an n-by-n graph with vertices
positioned over the square extending from ``-1`` to ``1`` in both directions. It returns a
tuple; `G.G` is the graph, and `G.locs` is
an array of tuples giving the spatial coordinates of each vertex.
`M1 = ALsimple(G.G, α)` creates the model. The unary parameters `α` were intialized to
Gaussian white noise. By default the pairwise parameter is set to zero, which implies
independence of the variables.
Typing `M1` at the REPL shows information about the model. It's an `ALsimple` type with one
observation of length 900.
```@repl Ising
M1
```
The `conditionalprobabilities` function returns the probablity of observing a ``+1`` state
at each vertex, conditional on the vertex's neighbor values. These can be visualized
as an image, using a `heatmap` (from [Plots.jl](https://github.com/JuliaPlots/Plots.jl)):
```@example Ising
using Plots
condprobs = conditionalprobabilities(M1)
hm = heatmap(reshape(condprobs, n, n), c=:grays, aspect_ratio=1,
title="probability of +1 under independence")
plot(hm)
```
Since the association parameter is zero, there are no neighborhood effects. The above
conditional probabilities are equal to the marginal probabilities.
Next, set the association parameters to 0.75, a fairly strong association level, to
introduce a neighbor effect.
```@example Ising
setpairwiseparameters!(M1, [0.75])
nothing # hide
```
We can also generalize the Ising model by allowing the pairwise parameters to be different
for each edge of the graph. The `ALfull` type represents such a model, which has a
`FullUnary` type unary parameter, and a `FullPairwise` type pairwise parameter. For this
example, let each edge's pairwise parameter be equal to the average distance of its two
vertices from the origin.
```@example Ising
using LinearAlgebra, Graphs
λ = [norm((G.locs[e.src] .+ G.locs[e.dst])./2) for e in edges(G.G)]
M2 = ALfull(G.G, α, λ)
```
A quick way to compare models with nonzero association is to observe random samples from the
models. The `sample` function can be used to do this. For this example, use perfect
sampling using a bounding chain algorithm.
```@example Ising
s1 = sample(M1, method=perfect_bounding_chain)
s2 = sample(M2, method=perfect_bounding_chain)
nothing #hide
```
Other options are available for sampling. The enumeration [`SamplingMethods`](@ref) lists
them. The samples we have just drawn can also be visualized using `heatmap`:
```@example Ising
pl1 = heatmap(reshape(s1, n, n), c=:grays, colorbar=false, title="ALsimple model");
pl2 = heatmap(reshape(s2, n, n), c=:grays, colorbar=false, title="ALfull model");
plot(pl1, pl2, size=(800,400), aspect_ratio=1)
```
In these plots, black indicates the low state, and white the high state. A lot of local
clustering is occurring in the samples due to the neighbor effects. For the `ALfull` model,
clustering is greater farther from the center of the square.
To see the long-run differences between the two models, we can look at the marginal
probabilities. They can be estimated by drawing many samples and averaging them
(note that running this code chunk can take a few minutes):
```julia
marg1 = sample(M1, 500, method=perfect_bounding_chain, verbose=true, average=true)
marg2 = sample(M2, 500, method=perfect_bounding_chain, verbose=true, average=true)
pl3 = heatmap(reshape(marg1, n, n), c=:grays,
colorbar=false, title="ALsimple model");
pl4 = heatmap(reshape(marg2, n, n), c=:grays,
colorbar=false, title="ALfull model");
plot(pl3, pl4, size=(800,400), aspect_ratio=1)
savefig("marginal-probs.png")
```
The figure `marginal-probs.png` looks like this:

The differences between the two marginal distributions are due to the different association
structures, because the unary parts of the two models are the same. The `ALfull` model has
stronger association near the edges of the square, and weaker association near the center.
The `ALsimple` model has a moderate association level throughout.
As a final demonstration, perform Gibbs sampling for model `M2`, starting from
a random state. Display a gif animation of the progress.
```julia
nframes = 150
gibbs_steps = sample(M2, nframes, method=Gibbs)
anim = @animate for i = 1:nframes
heatmap(reshape(gibbs_steps[:,i], n, n), c=:grays, colorbar=false,
aspect_ratio=1, title="Gibbs sampling: step $(i)")
end
gif(anim, "ising_gif.gif", fps=10)
```

## Clustered Binary Data (Small ``n``)
The *retinitis pigmentosa* data set (obtained from
[this source](https://sites.google.com/a/channing.harvard.edu/bernardrosner/channing/regression-method-when-the-eye-is-the-unit-of-analysis))
is an opthalmology data set. The data comes from 444 patients that had both eyes
examined. The data can be loaded with `Autologistic.datasets`:
```@repl pigmentosa
using Autologistic, DataFrames, Graphs
df = Autologistic.datasets("pigmentosa");
first(df, 6)
describe(df)
```
The response for each eye is **va**, an indicator of poor visual acuity (coded 0 = no,
1 = yes in the data set). Seven covariates were also recorded for each eye:
* **aut_dom**: autosomal dominant (0=no, 1=yes)
* **aut_rec**: autosomal recessive (0=no, 1=yes)
* **sex_link**: sex-linked (0=no, 1=yes)
* **age**: age (years, range 6-80)
* **sex**: gender (0=female, 1=male)
* **psc**: posterior subscapsular cataract (0=no, 1=yes)
* **eye**: which eye is it? (0=left, 1=right)
The last four factors are relevant clinical observations, and the first three are genetic
factors. The data set also includes an **ID** column with an ID number specific to each
patient. Eyes with the same ID come from the same person.
The natural unit of analysis is the eye, but pairs of observations from the same
patient are "clustered" because the occurrence of acuity loss in the left and right eye
is probably correlated. We can model each person's two **va** outcomes as two
dichotomous random variables with a 2-vertex, 1-edge graph.
```@repl pigmentosa
G = Graph(2,1)
```
Each of the 444 bivariate observations has this graph, and each has its own set of
covariates.
If we include all seven predictors, plus intercept, in our model, we have 2 variables per
observation, 8 predictors, and 444 observations.
Before creating the model we need to re-structure the covariates. The data in `df` has one
row per eye, with the variable `ID` indicating which eyes belong to the same patient. We
need to rearrange the responses (`Y`) and the predictors (`X`) into arrays suitable for our
autologistic models, namely:
* `Y` is ``2 \times 444`` with one observation per column.
* `X` is ``2 \times 8 \times 444`` with one ``2 \times 8`` matrix of predictors for each
observation. The first column of each predictor matrix is an intercept column, and
columns 2 through 8 are for `aut_dom`, `aut_rec`, `sex_link`, `age`, `sex`, `psc`, and
`eye`, respectively.
```@example pigmentosa
X = Array{Float64,3}(undef, 2, 8, 444);
Y = Array{Float64,2}(undef, 2, 444);
for i in 1:2:888
patient = Int((i+1)/2)
X[1,:,patient] = [1 permutedims(Vector(df[i,2:8]))]
X[2,:,patient] = [1 permutedims(Vector(df[i+1,2:8]))]
Y[:,patient] = convert(Array, df[i:i+1, 9])
end
```
For example, patient 100 had responses
```@repl pigmentosa
Y[:,100]
```
Indicating visual acuity loss in the left eye, but not in the right. The predictors for
this individual are
```@repl pigmentosa
X[:,:,100]
```
Now we can create our autologistic regression model.
```@example pigmentosa
model = ALRsimple(G, X, Y=Y)
```
This creates a model with the "simple pairwise" structure, using a single association
parameter. The default is to use no centering adjustment, and to use coding ``(-1,1)`` for
the responses. This "symmetric" version of the model is recommended for
[a variety of reasons](https://doi.org/10.3389/fams.2017.00024). Using different coding
or centering choices is only recommended if you have a thorough understanding of what
you are doing; but if you wish to use different choices, this can easily be done using
keyword arguments. For example, `ALRsimple(G, X, Y=Y, coding=(0,1), centering=expectation)`
creates the "centered autologistic model" that has appeared in the literature (e.g.,
[here](https://link.springer.com/article/10.1198/jabes.2009.07032) and
[here](https://doi.org/10.1002/env.1102)).
The model has nine parameters (eight regression coefficients plus the association
parameter). All parameters are initialized to zero:
```@repl pigmentosa
getparameters(model)
```
When we call `getparameters`, the vector returned always has the unary parameters first,
with the pairwise parameter(s) appended at the end.
Because there are only two vertices in the graph, we can use the full likelihood
(`fit_ml!` function) to do parameter estimation. This function returns a structure with
the estimates as well as standard errors, p-values, and 95% confidence intervals for the
parameter estimates.
```@example pigmentosa
out = fit_ml!(model)
```
To view the estimation results, use `summary`:
```@example pigmentosa
summary(out, parnames = ["icept", "aut_dom", "aut_rec", "sex_link", "age", "sex",
"psc", "eye", "λ"])
```
From this we see that the association parameter is fairly large (0.818), supporting the
idea that the left and right eyes are associated. It is also highly statistically
significant. Among the covariates, `sex_link`, `age`, and `psc` are all statistically
significant.
## Spatial Binary Regression
ALR models are natural candidates for analysis of spatial binary data, where
locations in the same neighborhood are more likely to have the same outcome than sites that
are far apart.
The [hydrocotyle data](https://doi.org/10.1016/j.ecolmodel.2007.04.024) provide a typical
example. The response in this data set is the presence/absence of a certain plant species
in a grid of 2995 regions covering Germany. The data set is included in Autologistic.jl:
```@repl hydro
using Autologistic, DataFrames, Graphs
df = Autologistic.datasets("hydrocotyle")
```
In the data frame, the variables `X` and `Y` give the spatial coordinates of each region (in
dimensionless integer units), `obs` gives the presence/absence data (1 = presence), and
`altitude` and `temperature` are covariates.
We will use an `ALRsimple` model for these data. The graph can be formed using
[`makespatialgraph`](@ref):
```@example hydro
locations = [(df.X[i], df.Y[i]) for i in 1:size(df,1)]
g = makespatialgraph(locations, 1.0)
nothing # hide
```
`makespatialgraph` creates the graph by adding edges between any vertices with Euclidean
distance smaller than a cutoff distance (Graphs.jl has a `euclidean_graph` function
that does the same thing). For these data arranged on a grid, a threshold
of 1.0 will make a 4-nearest-neighbors lattice. Letting the threshold be `sqrt(2)` would
make an 8-nearest-neighbors lattice.
We can visualize the graph, the responses, and the predictors using
[GraphRecipes.jl](https://github.com/JuliaPlots/GraphRecipes.jl) (there are
[several other](https://juliagraphs.org/Graphs.jl/stable/plotting/)
options for plotting graphs as well).
```@example hydro
using GraphRecipes, Plots
# Function to convert a value to a gray shade
makegray(x, lo, hi) = RGB([(x-lo)/(hi-lo) for i=1:3]...)
# Function to plot the graph with node shading determined by v.
# Plot each node as a square and don't show the edges.
function myplot(v, lohi=nothing)
colors = lohi==nothing ? makegray.(v, minimum(v), maximum(v)) : makegray.(v, lohi[1], lohi[2])
return graphplot(g.G, x=df.X, y=df.Y, background_color = :lightblue,
marker = :square, markersize=2, markerstrokewidth=0,
markercolor = colors, yflip = true, linecolor=nothing)
end
# Make the plot
plot(myplot(df.obs), myplot(df.altitude), myplot(df.temperature),
layout=(1,3), size=(800,300), titlefontsize=8,
title=hcat("Species Presence (white = yes)", "Altitude (lighter = higher)",
"Temperature (lighter = higher)"))
```
### Constructing the model
We can see that the species primarily is found at low-altitude locations. To model the
effect of altitude and temperature on species presence, construct an `ALRsimple` model.
```@example hydro
# Autologistic.jl reqiures predictors to be a matrix of Float64
Xmatrix = Array{Float64}([ones(2995) df.altitude df.temperature])
# Create the model
hydro = ALRsimple(g.G, Xmatrix, Y=df.obs)
```
The model `hydro` has four parameters: three regression coefficients (interceept, altitude,
and temperature) plus an association parameter. It is a "symmetric" autologistic model,
because it has a coding symmetric around zero and no centering term.
### Fitting the model by pseudolikelihood
With 2995 nodes in the graph, the likelihood is intractable for this case. Use `fit_pl!` to
do parameter estimation by pseudolikelihood instead. The fitting function uses the BFGS
algorithm via [`Optim.jl`](http://julianlsolvers.github.io/Optim.jl/stable/). Any of
Optim's [general options](http://julianlsolvers.github.io/Optim.jl/stable/#user/config/)
can be passed to `fit_pl!` to control the optimization. We have found that
`allow_f_increases` often aids convergence. It is used here:
```@repl hydro
fit1 = fit_pl!(hydro, allow_f_increases=true)
parnames = ["intercept", "altitude", "temperature", "association"];
summary(fit1, parnames=parnames)
```
`fit_pl!` mutates the model object by setting its parameters to the optimal values. It also
returns an object, of type `ALfit`, which holds information about the result. Calling
`summary(fit1)` produces a summary table of the estimates. For now there are no standard
errors. This will be addressed below.
To quickly visualize the quality of the fitted model, we can use sampling to get the
marginal probabilities, and to observe specific samples.
```@example hydro
# Average 500 samples to estimate marginal probability of species presence
marginal1 = sample(hydro, 500, method=perfect_bounding_chain, average=true)
# Draw 2 random samples for visualizing generated data.
draws = sample(hydro, 2, method=perfect_bounding_chain)
# Plot them
plot(myplot(marginal1, (0,1)), myplot(draws[:,1]), myplot(draws[:,2]),
layout=(1,3), size=(800,300), titlefontsize=8,
title=["Marginal Probability" "Random sample 1" "Random Sample 2"])
```
In the above code, perfect sampling was used to draw samples from the fitted distribution.
The marginal plot shows consistency with the observed data, and the two generated data
sets show a level of spatial clustering similar to the observed data.
### Error estimation 1: bootstrap after the fact
A parametric bootstrap can be used to get an estimate of the precision of the estimates
returned by `fit_pl!`. The function [`oneboot`](@ref) has been included in the package to
facilitate this. Each call of `oneboot` draws a random sample from the fitted distribution,
then re-fits the model using this sample as the responses. It returns a named tuple giving
the sample, the parameter estimates, and a convergence flag. Any extra keyword arguments
are passed on to `sample` or `optimize` as appropriate to control the process.
```@repl hydro
# Do one bootstrap replication for demonstration purposes.
oneboot(hydro, allow_f_increases=true, method=perfect_bounding_chain)
```
An array of the tuples produced by `oneboot` can be fed to [`addboot!`](@ref) to update
the fitting summary with precision estimates:
```julia
nboot = 2000
boots = [oneboot(hydro, allow_f_increases=true, method=perfect_bounding_chain) for i=1:nboot]
addboot!(fit1, boots)
```
At the time of writing, this took about 5.7 minutes on the author's workstation.
After adding the bootstrap information, the fitting results look like this:
```
julia> summary(fit1,parnames=parnames)
name est se 95% CI
intercept -0.192 0.319 (-0.858, 0.4)
altitude -0.0573 0.015 (-0.0887, -0.0296)
temperature 0.0498 0.0361 (-0.0163, 0.126)
association 0.361 0.018 (0.326, 0.397)
```
Confidence intervals for altitude and the association parameter both exclude zero, so we
conclude that they are statistically significant.
### Error estimation 2: (parallel) bootstrap when fitting
Alternatively, the bootstrap inference procedure can be done at the same time as fitting by
providing the keyword argument `nboot` (which specifies the number of bootstrap samples to
generate) when calling `fit_pl!`. If you do this, **and** you have more than one worker
process available, then the bootstrap will be done in parallel across the workers (using an
`@distributed for` loop). This makes it easy to achieve speed gains from parallelism on
multicore workstations.
```julia
using Distributed # needed for parallel computing
addprocs(6) # create 6 worker processes
@everywhere using Autologistic # workers need the package loaded
fit2 = fit_pl!(hydro, nboot=2000,
allow_f_increases=true, method=perfect_bounding_chain)
```
In this case the 2000 bootstrap replications took about 1.1 minutes on the same 6-core
workstation. The output object `fit2` already includes the confidence intervals:
```
julia> summary(fit2, parnames=parnames)
name est se 95% CI
intercept -0.192 0.33 (-0.9, 0.407)
altitude -0.0573 0.0157 (-0.0897, -0.0297)
temperature 0.0498 0.0372 (-0.0169, 0.13)
association 0.361 0.0179 (0.327, 0.396)
```
For parallel computing of the bootstrap in other settings (eg. on a cluster), it should be
fairly simple implement in a script, using the `oneboot`/`addboot!` approach of the previous
section.
### Comparison to logistic regression
If we ignore spatial association, and just fit the model with ordinary logistic regression,
we get the following result:
```@example hydro
using GLM
LR = glm(@formula(obs ~ altitude + temperature), df, Bernoulli(), LogitLink());
coef(LR)
```
As mentioned in [The Symmetric Model and Logistic Regression](@ref), the logistic regression
coefficients are not directly comparable to the ALR coefficients,
because the ALR model uses ``(-1, 1)`` coding. If we want to make the parameters
comparable, we can either transform the symmetric model's parameters, or fit the transformed
symmetric model (a model with ``(0,1)`` coding and `centering=onehalf`).
The parameter transformation is done as follows:
```@example hydro
transformed_pars = [2*getunaryparameters(hydro); 4*getpairwiseparameters(hydro)]
```
We see that the association parameter is large (1.45), but the regression parameters are
small compared to the logistic regression model. This is typical: ignoring spatial
association tends to result in overestimation of the regression effects.
We can fit the transformed model directly, to illustrate that the result is the same:
```@example hydro
same_as_hydro = ALRsimple(g.G, Xmatrix, Y=df.obs, coding=(0,1), centering=onehalf)
fit3 = fit_pl!(same_as_hydro, allow_f_increases=true)
fit3.estimate
```
We see that the parameter estimates from `same_as_hydro` are equal to the `hydro` estimates
after transformation.
### Comparison to the centered model
The centered autologistic model can be easily constructed for comparison with the
symmetric one. We can start with a copy of the symmetric model we have already created.
The pseudolikelihood function for the centered model is not convex. Three different local
optima were found. For this demonstration we are using the `start` argument to let
optimization start from a point close to the best minimum found.
```julia
centered_hydro = deepcopy(hydro)
centered_hydro.coding = (0,1)
centered_hydro.centering = expectation
fit4 = fit_pl!(centered_hydro, nboot=2000, start=[-1.7, -0.17, 0.0, 1.5],
allow_f_increases=true, method=perfect_bounding_chain)
```
```
julia> summary(fit4, parnames=parnames)
name est se 95% CI
intercept -2.29 1.07 (-4.6, -0.345)
altitude -0.16 0.0429 (-0.258, -0.088)
temperature 0.0634 0.115 (-0.138, 0.32)
association 1.51 0.0505 (1.42, 1.61)
julia> round.([fit3.estimate fit4.estimate], digits=3)
4×2 Array{Float64,2}:
-0.383 -2.29
-0.115 -0.16
0.1 0.063
1.446 1.506
```
The main difference between the symmetric ALR model and the centered one is the intercept,
which changes from -0.383 to -2.29 when changing to the centered model. This is not a small
difference. To see this, compare what the two models predict in the absence of spatial
association.
```julia
# Change models to have association parameters equal to zero
# Remember parameters are always Array{Float64,1}.
setpairwiseparameters!(centered_hydro, [0.0])
setpairwiseparameters!(hydro, [0.0])
# Sample to estimate marginal probabilities
centered_marg = sample(centered_hydro, 500, method=perfect_bounding_chain, average=true)
symmetric_marg = sample(hydro, 500, method=perfect_bounding_chain, average=true)
# Plot to compare
plot(myplot(centered_marg, (0,1)), myplot(symmetric_marg, (0,1)),
layout=(1,2), size=(500,300), titlefontsize=8,
title=["Centered Model" "Symmetric Model"])
```

If we remove the spatial association term, the centered model predicts a very low
probability of seeing the plant anywhere--including in locations with low elevation, where
the plant is plentiful in reality. This is a manifestation of a problem with the centered
model, where parameter interpretability is lost when association becomes strong. | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | docs | 324 | # Reference
```@meta
CurrentModule = Autologistic
DocTestSetup = :(using Autologistic, Graphs)
```
## Index
```@index
```
## Types and Constructors
```@autodocs
Modules = [Autologistic]
Order = [:type, :constant]
```
## Methods
```@autodocs
Modules = [Autologistic]
Order = [:function]
``` | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"MIT"
] | 0.5.1 | 15b96cd9e90eaa8d256266aed59fd13fd1c8f181 | docs | 1523 | # Introduction
The `Autologistic.jl` package provides tools for analyzing correlated binary data using
autologistic (AL) or autologistic regression (ALR) models. The AL model is a multivariate
probability distribution for dichotomous (two-valued) categorical responses. The ALR models
incorporate covariate effects into this distribution and are therefore more useful for data
analysis.
The ALR model is potentially useful for any situation involving correlated binary responses.
It can be described in a few ways. It is:
* An extension of logistic regression to handle non-independent responses.
* A Markov random field model for dichotomous random variables, with covariate effects.
* An extension of the Ising model to handle different graph structures and
to include covariate effects.
* The quadratic exponential binary (QEB) distribution, incorporating
covariate effects.
This package follows the treatment of this model given in the paper
[Better Autologistic Regression](https://doi.org/10.3389/fams.2017.00024). Please refer
to that article for in-depth discussion of the model, and please cite it if you use this
package in your research. The [Background](@ref) section in this manual also provides an
overview of the model.
## Contents
```@contents
Pages = ["index.md", "Background.md", "Design.md", "BasicUsage.md", "Examples.md", "api.md"]
Depth = 2
```
## Reference Index
The following topics are documented in the [Reference](@ref) section:
```@index
``` | Autologistic | https://github.com/kramsretlow/Autologistic.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 22182 | ### A Pluto.jl notebook ###
# v0.19.42
using Markdown
using InteractiveUtils
# ╔═╡ 2f721887-6dee-4b53-ae33-2c0a4b79ff37
# ╠═╡ show_logs = false
begin
using Pkg; Pkg.activate(".")
using Revise
using PlutoUI
# left right layout
function leftright(a, b; width=600)
HTML("""
<style>
table.nohover tr:hover td {
background-color: white !important;
}</style>
<table width=$(width)px class="nohover" style="border:none">
<tr>
<td>$(html(a))</td>
<td>$(html(b))</td>
</tr></table>
""")
end
# up down layout
function updown(a, b; width=nothing)
HTML("""<table class="nohover" style="border:none" $(width === nothing ? "" : "width=$(width)px")>
<tr>
<td>$(html(a))</td>
</tr>
<tr>
<td>$(html(b))</td>
</tr></table>
""")
end
PlutoUI.TableOfContents()
end
# ╔═╡ 39bcea18-00b6-42ca-a1f2-53655f31fea7
using UnitDiskMapping, Graphs, GenericTensorNetworks, LinearAlgebra
# ╔═╡ 98459516-4833-4e4a-916f-d5ea3e657ceb
# Visualization setup.
# To make the plots dark-mode friendly, we use white-background color.
using UnitDiskMapping.LuxorGraphPlot.Luxor, LuxorGraphPlot
# ╔═╡ eac6ceda-f5d4-11ec-23db-b7b4d00eaddf
md"# Unit Disk Mapping"
# ╔═╡ bbe26162-1ab7-4224-8870-9504b7c3aecf
md"## Generic Unweighted Mapping
The generic unweighted mapping aims to reduce a generic unweighted Maximum Independent Set (MIS) problem to one on a defected King's graph.
Check [our paper (link to be added)]() for the mapping scheme.
"
# ╔═╡ b23f0215-8751-4105-aa7e-2c26e629e909
md"Let the source graph be the Petersen graph."
# ╔═╡ 7518d763-17a4-4c6e-bff0-941852ec1ccf
graph = smallgraph(:petersen)
# ╔═╡ 0302be92-076a-4ebe-8d6d-4b352a77cfce
LuxorGraphPlot.show_graph(graph)
# ╔═╡ 417b18f6-6a8f-45fb-b979-6ec9d12c6246
md"We can use the `map_graph` function to map the unweighted MIS problem on the Petersen graph to one on a defected King's graph."
# ╔═╡ c7315578-8bb0-40a0-a2a3-685a80674c9c
unweighted_res = map_graph(graph; vertex_order=MinhThiTrick());
# ╔═╡ 3f605eac-f587-40b2-8fac-8223777d3fad
md"Here, the keyword argument `vertex_order` can be a vector of vertices in a specified order, or the method to compute the path decomposition that generates an order. The `MinhThiTrick()` method is an exact path decomposition solver, which is suited for small graphs (where number of vertices <= 50). The `Greedy()` method finds the vertex order much faster and works in all cases, but may not be optimal.
A good vertex order can reduce the depth of the mapped graph."
# ╔═╡ e5382b61-6387-49b5-bae8-0389fbc92153
md"The return value contains the following fields:"
# ╔═╡ ae5c8359-6bdb-4a2a-8b54-cd2c7d2af4bd
fieldnames(unweighted_res |> typeof)
# ╔═╡ 56bdcaa6-c8b9-47de-95d4-6e95204af0f2
md"The field `grid_graph` is the mapped grid graph."
# ╔═╡ 520fbc23-927c-4328-8dc6-5b98853fb90d
LuxorGraphPlot.show_graph(unweighted_res.grid_graph)
# ╔═╡ af162d39-2da9-4a06-9cde-8306e811ba7a
unweighted_res.grid_graph.size
# ╔═╡ 96ca41c0-ac77-404c-ada3-0cdc4a426e44
md"The field `lines` is a vector of copy gadgets arranged in a `⊢` shape. These copy gadgets form a *crossing lattice*, in which two copy lines cross each other whenever their corresponding vertices in the source graph are connected by an edge.
```
vslot
↓
| ← vstart
|
|------- ← hslot
| ↑ ← vstop
hstop
```
"
# ╔═╡ 5dfa8a74-26a5-45c4-a80c-47ba4a6a4ae9
unweighted_res.lines
# ╔═╡ a64c2094-9a51-4c45-b9d1-41693c89a212
md"The field `mapping_history` contains the rewrite rules applied to the crossing lattice. They contain important information for mapping a solution back."
# ╔═╡ 52b904ad-6fb5-4a7e-a3db-ae7aff32be51
unweighted_res.mapping_history
# ╔═╡ ef828107-08ce-4d91-ba56-2b2c7862aa50
md"The field `mis_overhead` is the difference between ``\alpha(G_M) - \alpha(G_S)``, where ``G_M`` and ``G_S`` are the mapped and source graph."
# ╔═╡ acd7107c-c739-4ee7-b0e8-6383c54f714f
unweighted_res.mis_overhead
# ╔═╡ 94feaf1f-77ea-4d6f-ba2f-2f9543e8c1bd
md"We can solve the mapped graph with [`GenericTensorNetworks`](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/)."
# ╔═╡ f084b98b-097d-4b33-a0d3-0d0a981f735e
res = solve(GenericTensorNetwork(IndependentSet(SimpleGraph(unweighted_res.grid_graph))), SingleConfigMax())[]
# ╔═╡ 86457b4e-b83e-4bf5-9d82-b5e14c055b4b
md"You might want to read [the documentation page of `GenericTensorNetworks`](https://queracomputing.github.io/GenericTensorNetworks.jl/dev/) for a detailed explanation on this function. Here, we just visually check the solution configuration."
# ╔═╡ 4abb86dd-67e2-46f4-ae6c-e97952b23fdc
show_config(unweighted_res.grid_graph, res.c.data)
# ╔═╡ 5ec5e23a-6904-41cc-b2dc-659da9556d20
md"By mapping the result back, we get a solution for the original Petersen graph. Its maximum independent set size is 4."
# ╔═╡ 773ce349-ba72-426c-849d-cfb511773756
# The solution obtained by solving the mapped graph
original_configs = map_config_back(unweighted_res, res.c.data)
# ╔═╡ 7d921205-5133-40c0-bfa6-f76713dd4972
# Confirm that the solution from the mapped graph gives us
# a maximum independent set for the original graph
UnitDiskMapping.is_independent_set(graph, original_configs)
# ╔═╡ 3273f936-a182-4ed0-9662-26aab489776b
md"## Generic Weighted Mapping"
# ╔═╡ 5e4500f5-beb6-4ef9-bd42-41dc13b60bce
md"A Maximum Weight Independent Set (MWIS) problem on a general graph can be mapped to one on the defected King's graph. The first step is to do the same mapping as above but adding a new positional argument `Weighted()` as the first argument of `map_graph`. Let us still use the Petersen graph as an example."
# ╔═╡ 2fa704ee-d5c1-4205-9a6a-34ba0195fecf
weighted_res = map_graph(Weighted(), graph; vertex_order=MinhThiTrick());
# ╔═╡ 27acc8be-2db8-4322-85b4-230fdddac043
md"The return value is similar to that for the unweighted mapping generated above, except each node in the mapped graph can have a weight 1, 2 or 3. Note here, we haven't added the weights in the original graph."
# ╔═╡ b8879b2c-c6c2-47e2-a989-63a00c645676
show_grayscale(weighted_res.grid_graph)
# ╔═╡ 1262569f-d091-40dc-a431-cbbe77b912ab
md"""
The "pins" of the mapped graph have a one-to-one correspondence to the vertices in the source graph.
"""
# ╔═╡ d5a64013-b7cc-412b-825d-b9d8f0737248
show_pins(weighted_res)
# ╔═╡ 3c46e050-0f93-42af-a6ff-1a83e7d0f6da
md"The weights in the original graph can be added to the pins of this grid graph using the `map_weights` function. The added weights must be smaller than 1."
# ╔═╡ 39cbb6fc-1c55-42dd-bbf6-54e06f5c7048
source_weights = rand(10)
# ╔═╡ 41840a24-596e-4d93-9468-35329d57b0ce
mapped_weights = map_weights(weighted_res, source_weights)
# ╔═╡ f77293c4-e5c3-4f14-95a2-ac9688fa3ba1
md"Now that we have both the graph and the weights, let us solve the mapped problem!"
# ╔═╡ cf910d3e-3e3c-42ef-acf3-d0990d6227ac
wmap_config = let
graph, _ = graph_and_weights(weighted_res.grid_graph)
collect(Int,
solve(GenericTensorNetwork(IndependentSet(graph, mapped_weights)), SingleConfigMax())[].c.data
)
end
# ╔═╡ d0648123-65fc-4dd7-8c0b-149b67920d8b
show_config(weighted_res.grid_graph, wmap_config)
# ╔═╡ fdc0fd6f-369e-4f1b-b105-672ae4229f02
md"By reading the configurations of the pins, we obtain a solution for the source graph."
# ╔═╡ 317839b5-3c30-401f-970c-231c204331b5
# The solution obtained by solving the mapped graph
map_config_back(weighted_res, wmap_config)
# ╔═╡ beb7c0e5-6221-4f20-9166-2bd56902be1b
# Directly solving the source graph
collect(Int,
solve(GenericTensorNetwork(IndependentSet(graph, source_weights)), SingleConfigMax())[].c.data
)
# ╔═╡ cf7e88cb-432e-4e3a-ae8b-8fa12689e485
md"## QUBO problem"
# ╔═╡ d16a6f2e-1ae2-47f1-8496-db6963800fd2
md"### Generic QUBO mapping"
# ╔═╡ b5d95984-cf8d-4bce-a73a-8eb2a7c6b830
md"""
A QUBO problem can be specified as the following energy model:
```math
E(z) = -\sum_{i<j} J_{ij} z_i z_j + \sum_i h_i z_i
```
"""
# ╔═╡ 2d1eb5cb-183d-4c4e-9a14-53fa08cbb156
n = 6
# ╔═╡ 5ce3e8c9-e78e-4444-b502-e91b4bda5678
J = triu(randn(n, n) * 0.001, 1); J += J'
# ╔═╡ 828cf2a9-9178-41ae-86d3-e14d8c909c39
h = randn(n) * 0.001
# ╔═╡ 09db490e-961a-4c64-bcc5-5c111bfd3b7a
md"Now, let us do the mapping on an ``n \times n`` crossing lattice."
# ╔═╡ 081d1eee-96b1-4e76-8b8c-c0d4e5bdbaed
qubo = UnitDiskMapping.map_qubo(J, h);
# ╔═╡ 7974df7d-c390-4706-b7ba-6bde4409510d
md"The mapping result contains two fields, the `grid_graph` and the `pins`. After finding the ground state of the mapped independent set problem, the configuration of the spin glass can be read directly from the pins. The following graph plots the pins in red color."
# ╔═╡ e6aeeeb4-704c-4ba4-abc2-29c4029e276d
qubo_graph, qubo_weights = UnitDiskMapping.graph_and_weights(qubo.grid_graph)
# ╔═╡ 8467e950-7302-4930-8698-8e7b523556a6
show_pins(qubo)
# ╔═╡ 6976c82f-90f0-4091-b13d-af463fe75c8b
md"One can also check the weights using the gray-scale plot."
# ╔═╡ 95539e68-c1ea-4a6c-9406-2696d62b8461
show_grayscale(qubo.grid_graph)
# ╔═╡ 5282ca54-aa98-4d51-aaf9-af20eae5cc81
md"By solving this maximum independent set problem, we will get the following configuration."
# ╔═╡ ef149d9a-6aa9-4f34-b936-201b9d77543c
qubo_mapped_solution = collect(Int, solve(GenericTensorNetwork(IndependentSet(qubo_graph, qubo_weights)), SingleConfigMax())[].c.data)
# ╔═╡ 4ea4f26e-746d-488e-9968-9fc584c04bcf
show_config(qubo.grid_graph, qubo_mapped_solution)
# ╔═╡ b64500b6-99b6-497b-9096-4bab4ddbec8d
md"This solution can be mapped to a solution for the source graph by reading the configurations on the pins."
# ╔═╡ cca6e2f8-69c5-4a3a-9f97-699b4868c4b9
# The solution obtained by solving the mapped graph
map_config_back(qubo, collect(Int, qubo_mapped_solution))
# ╔═╡ 80757735-8e73-4cae-88d0-9fe3d3e539c0
md"This solution is consistent with the exact solution:"
# ╔═╡ 7dd900fc-9531-4bd6-8b6d-3aac3d5a2386
# Directly solving the source graph, due to the convention issue, we flip the signs of `J` and `h`
collect(Int, solve(GenericTensorNetwork(spin_glass_from_matrix(-J, -h)), SingleConfigMax())[].c.data)
# ╔═╡ 13f952ce-642a-4396-b574-00ea6584008c
md"### QUBO problem on a square lattice"
# ╔═╡ fcc22a84-011f-48ed-bc0b-41f4058b92fd
md"We define some coupling strengths and onsite energies on a $n \times n$ square lattice."
# ╔═╡ e7be21d1-971b-45fd-aa83-591d43262567
square_coupling = [[(i,j,i,j+1,0.01*randn()) for i=1:n, j=1:n-1]...,
[(i,j,i+1,j,0.01*randn()) for i=1:n-1, j=1:n]...];
# ╔═╡ 1702a65f-ad54-4520-b2d6-129c0576d708
square_onsite = vec([(i, j, 0.01*randn()) for i=1:n, j=1:n]);
# ╔═╡ 49ad22e7-e859-44d4-8179-e088e1159d04
md"Then we use `map_qubo_square` to reduce the QUBO problem on a square lattice to the MIS problem on a grid graph."
# ╔═╡ 32910090-9a42-475a-8e83-f9712f8fe551
qubo_square = UnitDiskMapping.map_qubo_square(square_coupling, square_onsite);
# ╔═╡ 7b5fcd3b-0f0a-44c3-9bf6-1dc042585322
show_grayscale(qubo_square.grid_graph)
# ╔═╡ 3ce74e3a-43f4-47a5-8dde-1d49e54e7eab
md"You can see each coupling is replaced by the following `XOR` gadget"
# ╔═╡ 8edabda9-c49b-407e-bae8-1a71a1fe19b4
show_grayscale(UnitDiskMapping.gadget_qubo_square(Int), texts=["x$('₀'+i)" for i=1:8])
# ╔═╡ 3ec7c034-4cb6-4b9f-96fb-c6dc428475bb
md"Where dark nodes have weight 2 and light nodes have weight 1. It corresponds to the boolean equation ``x_8 = \neg (x_1 \veebar x_5)``; hence we can add ferromagnetic couplings as negative weights and anti-ferromagnetic couplings as positive weights. On-site terms are added directly to the pins."
# ╔═╡ 494dfca2-af57-4dd9-9825-b28269641359
show_pins(qubo_square)
# ╔═╡ ca1d7917-58e2-4b7d-8671-ced548ccfe89
md"Let us solve the independent set problem on the mapped graph."
# ╔═╡ 30c33553-3b4d-4eff-b34c-7ac0579650f7
square_graph, square_weights = UnitDiskMapping.graph_and_weights(qubo_square.grid_graph);
# ╔═╡ 5c25abb7-e3ee-4104-9a82-eb4aa4e773d2
config_square = collect(Int, solve(GenericTensorNetwork(IndependentSet(square_graph, square_weights)), SingleConfigMax())[].c.data);
# ╔═╡ 4cec7232-8fbc-4ac1-96bb-6c7fea5fe117
md"We will get the following configuration."
# ╔═╡ 9bc9bd86-ffe3-48c1-81c0-c13f132e0dc1
show_config(qubo_square.grid_graph, config_square)
# ╔═╡ 9b0f051b-a107-41f2-b7b9-d6c673b7f93b
md"By reading out the configurations at pins, we can get a solution of the source QUBO problem."
# ╔═╡ d4c5240c-e70f-45f5-859f-1399c57511b0
r1 = map_config_back(qubo_square, config_square)
# ╔═╡ ffa9ad39-64e0-4655-b04e-23f57490d326
md"It can be easily checked by examining the exact result."
# ╔═╡ dfd4418e-19f0-42f2-87c5-69eacf2024ac
let
# solve QUBO directly
g2 = SimpleGraph(n*n)
Jd = Dict{Tuple{Int,Int}, Float64}()
for (i,j,i2,j2,J) in square_coupling
edg = (i+(j-1)*n, i2+(j2-1)*n)
Jd[edg] = J
add_edge!(g2, edg...)
end
Js, hs = Float64[], zeros(Float64, nv(g2))
for e in edges(g2)
push!(Js, Jd[(e.src, e.dst)])
end
for (i,j,h) in square_onsite
hs[i+(j-1)*n] = h
end
collect(Int, solve(GenericTensorNetwork(SpinGlass(g2, -Js, -hs)), SingleConfigMax())[].c.data)
end
# ╔═╡ 9db831d6-7f10-47be-93d3-ebc892c4b3f2
md"## Factorization problem"
# ╔═╡ e69056dd-0052-4d1e-aef1-30411d416c82
md"The building block of the array multiplier can be mapped to the following gadget:"
# ╔═╡ 13e3525b-1b8e-4f65-8742-21d8ba4fdbe3
let
graph, pins = UnitDiskMapping.multiplier()
texts = fill("", length(graph.nodes))
texts[pins] .= ["x$i" for i=1:length(pins)]
show_grayscale(graph)
end
# ╔═╡ 025b38e1-d334-46b6-bf88-f7b426e8dc97
md"""
Let us denote the input and output pins as ``x_{1-8} \in \{0, 1\}``. The above gadget implements the following equations:
```math
\begin{align}
x_1 + x_2 x_3 + x_4 = x_5 + 2 x_7,\\
x_2 = x_6,\\
x_3 = x_8.
\end{align}
```
"""
# ╔═╡ 76ab8044-78ec-41b5-b11a-df4e7e009e64
md"One can call `map_factoring(M, N)` to map a factoring problem to the array multiplier grid of size (M, N). In the following example of (2, 2) array multiplier, the input integers are ``p = 2p_2+p_1`` and ``q= 2q_2+q_1``, and the output integer is ``m = 4m_3+2m_2+m_1``. The maximum independent set corresponds to the solution of ``pq = m``"
# ╔═╡ b3c5283e-15fc-48d6-b58c-b26d70e5f5a4
mres = UnitDiskMapping.map_factoring(2, 2);
# ╔═╡ adbae5f0-6fe9-4a97-816b-004e47b15593
show_pins(mres)
# ╔═╡ 2e13cbad-8110-4cbc-8890-ecbefe1302dd
md"To solve this factoring problem, one can use the following statement:"
# ╔═╡ e5da7214-0e69-4b5a-a65e-ed92d0616c71
multiplier_output = UnitDiskMapping.solve_factoring(mres, 6) do g, ws
collect(Int, solve(GenericTensorNetwork(IndependentSet(g, ws)), SingleConfigMax())[].c.data)
end
# ╔═╡ 9dc01591-5c37-4d83-b640-83280513941e
md"This function consists of the following steps:"
# ╔═╡ 41d9b8fd-dd18-4270-803f-bd6206845788
md"1. We first modify the graph by inspecting the fixed values, i.e., the output `m` and `0`s:
* If a vertex is fixed to 1, remove it and its neighbors,
* If a vertex is fixed to 0, remove this vertex.
The resulting grid graph is
"
# ╔═╡ b8b5aff0-2ed3-4237-9b9d-9eb0bf2f2878
mapped_grid_graph, remaining_vertices = let
g, ws = graph_and_weights(mres.grid_graph)
mg, vmap = UnitDiskMapping.set_target(g, [mres.pins_zeros..., mres.pins_output...], 6 << length(mres.pins_zeros))
GridGraph(mres.grid_graph.size, mres.grid_graph.nodes[vmap], mres.grid_graph.radius), vmap
end;
# ╔═╡ 97cf8ee8-dba3-4b0b-b0ba-97002bc0f028
show_graph(mapped_grid_graph)
# ╔═╡ 0a8cec9c-7b9d-445b-abe3-237f16fdd9ad
md"2. Then, we solve this new grid graph."
# ╔═╡ 57f7e085-9589-4a6c-ac14-488ea9924692
config_factoring6 = let
mg, mw = graph_and_weights(mapped_grid_graph)
solve(GenericTensorNetwork(IndependentSet(mg, mw)), SingleConfigMax())[].c.data
end;
# ╔═╡ 4c7d72f1-688a-4a70-8ce6-a4801127bb9a
show_config(mapped_grid_graph, config_factoring6)
# ╔═╡ 77bf7e4a-1237-4b24-bb31-dc8a30756834
md"3. It is straightforward to read out the results from the above configuration. The solution should be either (2, 3) or (3, 2)."
# ╔═╡ 5a79eba5-3031-4e21-836e-961a9d939862
let
cfg = zeros(Int, length(mres.grid_graph.nodes))
cfg[remaining_vertices] .= config_factoring6
bitvectors = cfg[mres.pins_input1], cfg[mres.pins_input2]
UnitDiskMapping.asint.(bitvectors)
end
# ╔═╡ 27c2ba44-fcee-4647-910e-ae16f430b87d
md"## Logic Gates"
# ╔═╡ d577e515-f3cf-4f27-b0b5-a94cb38abf1a
md"Let us define a helper function for visualization."
# ╔═╡ c17bca17-a00a-4118-a212-d21da09af9b5
parallel_show(gate) = leftright(show_pins(Gate(gate)), show_grayscale(gate_gadget(Gate(gate))[1], wmax=2));
# ╔═╡ 6aee2288-1934-4fc5-9a9c-f45b7ce4e767
md"1. NOT gate: ``y_1 =\neg x_1``"
# ╔═╡ fadded74-8a89-4348-88f6-50d12cde6234
parallel_show(:NOT)
# ╔═╡ 0b28fab8-eb04-46d9-aa19-82e4bab45eb9
md"2. NXOR gate: ``y_1 =\neg (x_1 \veebar x_2)``. Notice this negated XOR gate is used in the square lattice QUBO mapping."
# ╔═╡ 791b9fde-1df2-4239-8372-2e3dd36d6f34
parallel_show(:NXOR)
# ╔═╡ 60ef4369-831d-413e-bcc2-e088697b6ba4
md"3. NOR gate: ``y_1 =\neg (x_1 \vee x_2)``"
# ╔═╡ f46c3993-e01d-47fb-873a-c608e0d49d83
parallel_show(:NOR)
# ╔═╡ d3779618-f61f-4874-93f1-94e78bb21c94
md"4. AND gate: ``y_1 =x_1 \wedge x_2``"
# ╔═╡ 330a5f6c-601f-47e6-8294-e6af89818d7d
parallel_show(:AND)
# ╔═╡ 36173fe2-784f-472a-9cab-03f2a0a2b725
md"Since most logic gates have 3 pins, it is natural to embed a circuit to a 3D unit disk graph by taking the z direction as the time. In a 2D grid, one needs to do the general weighted mapping in order to create a unit disk boolean circuit."
# ╔═╡ Cell order:
# ╟─eac6ceda-f5d4-11ec-23db-b7b4d00eaddf
# ╟─2f721887-6dee-4b53-ae33-2c0a4b79ff37
# ╠═39bcea18-00b6-42ca-a1f2-53655f31fea7
# ╠═98459516-4833-4e4a-916f-d5ea3e657ceb
# ╟─bbe26162-1ab7-4224-8870-9504b7c3aecf
# ╟─b23f0215-8751-4105-aa7e-2c26e629e909
# ╠═7518d763-17a4-4c6e-bff0-941852ec1ccf
# ╠═0302be92-076a-4ebe-8d6d-4b352a77cfce
# ╟─417b18f6-6a8f-45fb-b979-6ec9d12c6246
# ╠═c7315578-8bb0-40a0-a2a3-685a80674c9c
# ╟─3f605eac-f587-40b2-8fac-8223777d3fad
# ╟─e5382b61-6387-49b5-bae8-0389fbc92153
# ╠═ae5c8359-6bdb-4a2a-8b54-cd2c7d2af4bd
# ╟─56bdcaa6-c8b9-47de-95d4-6e95204af0f2
# ╠═520fbc23-927c-4328-8dc6-5b98853fb90d
# ╠═af162d39-2da9-4a06-9cde-8306e811ba7a
# ╟─96ca41c0-ac77-404c-ada3-0cdc4a426e44
# ╠═5dfa8a74-26a5-45c4-a80c-47ba4a6a4ae9
# ╟─a64c2094-9a51-4c45-b9d1-41693c89a212
# ╠═52b904ad-6fb5-4a7e-a3db-ae7aff32be51
# ╟─ef828107-08ce-4d91-ba56-2b2c7862aa50
# ╠═acd7107c-c739-4ee7-b0e8-6383c54f714f
# ╟─94feaf1f-77ea-4d6f-ba2f-2f9543e8c1bd
# ╠═f084b98b-097d-4b33-a0d3-0d0a981f735e
# ╟─86457b4e-b83e-4bf5-9d82-b5e14c055b4b
# ╠═4abb86dd-67e2-46f4-ae6c-e97952b23fdc
# ╟─5ec5e23a-6904-41cc-b2dc-659da9556d20
# ╠═773ce349-ba72-426c-849d-cfb511773756
# ╠═7d921205-5133-40c0-bfa6-f76713dd4972
# ╟─3273f936-a182-4ed0-9662-26aab489776b
# ╟─5e4500f5-beb6-4ef9-bd42-41dc13b60bce
# ╠═2fa704ee-d5c1-4205-9a6a-34ba0195fecf
# ╟─27acc8be-2db8-4322-85b4-230fdddac043
# ╠═b8879b2c-c6c2-47e2-a989-63a00c645676
# ╟─1262569f-d091-40dc-a431-cbbe77b912ab
# ╠═d5a64013-b7cc-412b-825d-b9d8f0737248
# ╟─3c46e050-0f93-42af-a6ff-1a83e7d0f6da
# ╠═39cbb6fc-1c55-42dd-bbf6-54e06f5c7048
# ╠═41840a24-596e-4d93-9468-35329d57b0ce
# ╟─f77293c4-e5c3-4f14-95a2-ac9688fa3ba1
# ╠═cf910d3e-3e3c-42ef-acf3-d0990d6227ac
# ╠═d0648123-65fc-4dd7-8c0b-149b67920d8b
# ╟─fdc0fd6f-369e-4f1b-b105-672ae4229f02
# ╠═317839b5-3c30-401f-970c-231c204331b5
# ╠═beb7c0e5-6221-4f20-9166-2bd56902be1b
# ╟─cf7e88cb-432e-4e3a-ae8b-8fa12689e485
# ╟─d16a6f2e-1ae2-47f1-8496-db6963800fd2
# ╟─b5d95984-cf8d-4bce-a73a-8eb2a7c6b830
# ╠═2d1eb5cb-183d-4c4e-9a14-53fa08cbb156
# ╠═5ce3e8c9-e78e-4444-b502-e91b4bda5678
# ╠═828cf2a9-9178-41ae-86d3-e14d8c909c39
# ╟─09db490e-961a-4c64-bcc5-5c111bfd3b7a
# ╠═081d1eee-96b1-4e76-8b8c-c0d4e5bdbaed
# ╟─7974df7d-c390-4706-b7ba-6bde4409510d
# ╠═e6aeeeb4-704c-4ba4-abc2-29c4029e276d
# ╠═8467e950-7302-4930-8698-8e7b523556a6
# ╟─6976c82f-90f0-4091-b13d-af463fe75c8b
# ╠═95539e68-c1ea-4a6c-9406-2696d62b8461
# ╟─5282ca54-aa98-4d51-aaf9-af20eae5cc81
# ╠═ef149d9a-6aa9-4f34-b936-201b9d77543c
# ╠═4ea4f26e-746d-488e-9968-9fc584c04bcf
# ╟─b64500b6-99b6-497b-9096-4bab4ddbec8d
# ╠═cca6e2f8-69c5-4a3a-9f97-699b4868c4b9
# ╟─80757735-8e73-4cae-88d0-9fe3d3e539c0
# ╠═7dd900fc-9531-4bd6-8b6d-3aac3d5a2386
# ╟─13f952ce-642a-4396-b574-00ea6584008c
# ╟─fcc22a84-011f-48ed-bc0b-41f4058b92fd
# ╠═e7be21d1-971b-45fd-aa83-591d43262567
# ╠═1702a65f-ad54-4520-b2d6-129c0576d708
# ╟─49ad22e7-e859-44d4-8179-e088e1159d04
# ╠═32910090-9a42-475a-8e83-f9712f8fe551
# ╠═7b5fcd3b-0f0a-44c3-9bf6-1dc042585322
# ╟─3ce74e3a-43f4-47a5-8dde-1d49e54e7eab
# ╠═8edabda9-c49b-407e-bae8-1a71a1fe19b4
# ╟─3ec7c034-4cb6-4b9f-96fb-c6dc428475bb
# ╠═494dfca2-af57-4dd9-9825-b28269641359
# ╟─ca1d7917-58e2-4b7d-8671-ced548ccfe89
# ╠═30c33553-3b4d-4eff-b34c-7ac0579650f7
# ╠═5c25abb7-e3ee-4104-9a82-eb4aa4e773d2
# ╟─4cec7232-8fbc-4ac1-96bb-6c7fea5fe117
# ╠═9bc9bd86-ffe3-48c1-81c0-c13f132e0dc1
# ╟─9b0f051b-a107-41f2-b7b9-d6c673b7f93b
# ╠═d4c5240c-e70f-45f5-859f-1399c57511b0
# ╟─ffa9ad39-64e0-4655-b04e-23f57490d326
# ╠═dfd4418e-19f0-42f2-87c5-69eacf2024ac
# ╟─9db831d6-7f10-47be-93d3-ebc892c4b3f2
# ╟─e69056dd-0052-4d1e-aef1-30411d416c82
# ╠═13e3525b-1b8e-4f65-8742-21d8ba4fdbe3
# ╟─025b38e1-d334-46b6-bf88-f7b426e8dc97
# ╟─76ab8044-78ec-41b5-b11a-df4e7e009e64
# ╠═b3c5283e-15fc-48d6-b58c-b26d70e5f5a4
# ╠═adbae5f0-6fe9-4a97-816b-004e47b15593
# ╟─2e13cbad-8110-4cbc-8890-ecbefe1302dd
# ╠═e5da7214-0e69-4b5a-a65e-ed92d0616c71
# ╟─9dc01591-5c37-4d83-b640-83280513941e
# ╟─41d9b8fd-dd18-4270-803f-bd6206845788
# ╠═b8b5aff0-2ed3-4237-9b9d-9eb0bf2f2878
# ╠═97cf8ee8-dba3-4b0b-b0ba-97002bc0f028
# ╟─0a8cec9c-7b9d-445b-abe3-237f16fdd9ad
# ╠═57f7e085-9589-4a6c-ac14-488ea9924692
# ╠═4c7d72f1-688a-4a70-8ce6-a4801127bb9a
# ╟─77bf7e4a-1237-4b24-bb31-dc8a30756834
# ╠═5a79eba5-3031-4e21-836e-961a9d939862
# ╟─27c2ba44-fcee-4647-910e-ae16f430b87d
# ╟─d577e515-f3cf-4f27-b0b5-a94cb38abf1a
# ╠═c17bca17-a00a-4118-a212-d21da09af9b5
# ╟─6aee2288-1934-4fc5-9a9c-f45b7ce4e767
# ╠═fadded74-8a89-4348-88f6-50d12cde6234
# ╟─0b28fab8-eb04-46d9-aa19-82e4bab45eb9
# ╠═791b9fde-1df2-4239-8372-2e3dd36d6f34
# ╟─60ef4369-831d-413e-bcc2-e088697b6ba4
# ╠═f46c3993-e01d-47fb-873a-c608e0d49d83
# ╟─d3779618-f61f-4874-93f1-94e78bb21c94
# ╠═330a5f6c-601f-47e6-8294-e6af89818d7d
# ╟─36173fe2-784f-472a-9cab-03f2a0a2b725
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 8574 | ### A Pluto.jl notebook ###
# v0.19.42
using Markdown
using InteractiveUtils
# ╔═╡ f55dbf80-8425-11ee-2e7d-4d1ad4f693af
# ╠═╡ show_logs = false
begin
using Pkg; Pkg.activate(".")
using Revise
using PlutoUI
# left right layout
function leftright(a, b; width=600)
HTML("""
<style>
table.nohover tr:hover td {
background-color: white !important;
}</style>
<table width=$(width)px class="nohover" style="border:none">
<tr>
<td>$(html(a))</td>
<td>$(html(b))</td>
</tr></table>
""")
end
# up down layout
function updown(a, b; width=nothing)
HTML("""<table class="nohover" style="border:none" $(width === nothing ? "" : "width=$(width)px")>
<tr>
<td>$(html(a))</td>
</tr>
<tr>
<td>$(html(b))</td>
</tr></table>
""")
end
PlutoUI.TableOfContents()
end
# ╔═╡ be011e30-74e6-49cd-b45a-288972dc5f18
using UnitDiskMapping, Graphs # for mapping graphs to a King's subgraph (KSG)
# ╔═╡ 31250cb9-6f3a-429a-975d-752cb7c07883
using GenericTensorNetworks # for solving the maximum independent sets
# ╔═╡ 9017a42c-9791-4933-84a4-9ff509967323
md"""
# Unweighted KSG reduction of the independent set problem
"""
# ╔═╡ f0e7c030-4e43-4356-a5bb-717a7f382a17
md"""This notebook contains examples from the paper, "Computer-Assisted Gadget Design and Problem Reduction of Unweighted Maximum Independent Set"."""
# ╔═╡ cb4a9655-6df2-46b3-8969-8b6f2db7c59a
md"""
## Example 1: The 5-vertex graph
The five vertex demo graph in the paper.
"""
# ╔═╡ 956a5c3a-b8c6-4040-9553-3b4e2337b163
md"#### Step 1: Prepare a source graph."
# ╔═╡ d858f57e-1706-4b73-bc23-53f7af073b0c
# the demo graph in the main text
function demograph()
g = SimpleGraph(5)
for (i, j) in [(1, 2), (2, 4), (3, 4), (1, 3), (4, 5), (1, 5)]
add_edge!(g, i, j)
end
return g
end
# ╔═╡ a3a86c62-ee6e-4a3b-99b3-c484de3b5220
g5 = demograph()
# ╔═╡ e6170e72-0804-401e-b9e5-65b8ee7d7edb
show_graph(g5)
# ╔═╡ 625bdcf4-e37e-4bb8-bd1a-907cdcc5fe24
md"""
#### Step 2: Map the source graph to an unweighted King's subgraph (KSG)
The vertex order is optimized with the Branching path decomposition algorithm (MinhThi's Trick)
"""
# ╔═╡ f9e57a6b-1186-407e-a8b1-cb8f31a17bd2
g5res = UnitDiskMapping.map_graph(g5; vertex_order=MinhThiTrick())
# ╔═╡ e64e7ca4-b297-4c74-8699-bec4b4fbb843
md"Visualize the mapped KSG graph in terminal"
# ╔═╡ 0a860597-0610-48f6-b1ee-711939712de4
print(g5res.grid_graph)
# ╔═╡ eeae7074-ee21-44fc-9605-3555acb84cee
md"or in a plotting plane"
# ╔═╡ 3fa6052b-74c2-453d-a473-68f4b3ca0490
show_graph(g5res.grid_graph)
# ╔═╡ 942e8dfb-b89d-4f2d-b1db-f4636d4e5de6
md"#### Step 3: Solve the MIS size of the mapped graph"
# ╔═╡ 766018fa-81bd-4c37-996a-0cf77b0909af
md"The independent set size can be obtained by solving the `SizeMax()` property using the [generic tensor network](https://github.com/QuEraComputing/GenericTensorNetworks.jl) method."
# ╔═╡ 67fd2dd2-5add-4402-9618-c9b7c7bfe95b
missize_g5_ksg = solve(GenericTensorNetwork(IndependentSet(SimpleGraph(g5res.grid_graph))), SizeMax())[]
# ╔═╡ aaee9dbc-5b9c-41b1-b0d4-35d2cac7c773
md"The predicted MIS size for the source graph is:"
# ╔═╡ 114e2c42-aaa3-470b-a267-e5a7c6b08607
missize_g5_ksg.n - g5res.mis_overhead
# ╔═╡ e6fa2404-cbe9-4f9b-92a0-0d6fdb649c44
md"""
One of the best solutions can be obtained by solving the `SingleConfigMax()` property.
"""
# ╔═╡ 0142f661-0855-45b4-852a-78f560e98c67
mis_g5_ksg = solve(GenericTensorNetwork(IndependentSet(SimpleGraph(g5res.grid_graph))), SingleConfigMax())[].c.data
# ╔═╡ fa046f3c-fd7d-4e91-b3f5-fc4591d3cae2
md"Plot the solution"
# ╔═╡ 0cbcd2a6-b8ae-47ff-8541-963b9dae700a
show_config(g5res.grid_graph, mis_g5_ksg)
# ╔═╡ 4734dc0b-0770-4f84-8046-95a74104936f
md"#### Step 4: Map the KSG solution back"
# ╔═╡ 0f27de9f-2e06-4d5e-b96f-b7c7fdadabca
md"In the following, we will show how to obtain an MIS of the source graph from that of its KSG reduction."
# ╔═╡ fc968df0-832b-44c9-8335-381405b92199
mis_g5 = UnitDiskMapping.map_config_back(g5res, collect(mis_g5_ksg))
# ╔═╡ 29458d07-b2b2-49af-a696-d0cb0ad35481
md"Show that the overhead in the MIS size is correct"
# ╔═╡ fa4888b2-fc67-4285-8305-da655c42a898
md"Verify the result:"
# ╔═╡ e84102e8-d3f2-4f91-87be-dba8e81462fb
# the extracted solution is an independent set
UnitDiskMapping.is_independent_set(g5, mis_g5)
# ╔═╡ 88ec52b3-73fd-4853-a69b-442f5fd2e8f7
# and its size is maximized
count(isone, mis_g5)
# ╔═╡ 5621bb2a-b1c6-4f0d-921e-980b2ce849d5
solve(GenericTensorNetwork(IndependentSet(g5)), SizeMax())[].n
# ╔═╡ 1fe6c679-2962-4c1b-8b12-4ceb77ed9e0f
md"""
## Example 2: The Petersen graph
We just quickly go through a second example, the Petersen graph.
"""
# ╔═╡ ea379863-95dd-46dd-a0a3-0a564904476a
petersen = smallgraph(:petersen)
# ╔═╡ d405e7ec-50e3-446c-8d19-18f1a66c1e3b
show_graph(petersen)
# ╔═╡ 409b03d1-384b-48d3-9010-8079cbf66dbf
md"We first map it to a grid graph (unweighted)."
# ╔═╡ a0e7da6b-3b71-43d4-a1da-f1bd953e4b50
petersen_res = UnitDiskMapping.map_graph(petersen)
# ╔═╡ 4f1f0ca0-dd2a-4768-9b4e-80813c9bb544
md"The MIS size of the petersen graph is 4."
# ╔═╡ bf97a268-cd96-4dbc-83c6-10eb1b03ddcc
missize_petersen = solve(GenericTensorNetwork(IndependentSet(petersen)), SizeMax())[]
# ╔═╡ 2589f112-5de5-4c98-bcd1-138b6143cd30
md" The MIS size of the mapped KSG graph is much larger"
# ╔═╡ 1b946455-b152-4d6f-9968-7dc6e22d171a
missize_petersen_ksg = solve(GenericTensorNetwork(IndependentSet(SimpleGraph(petersen_res.grid_graph))), SizeMax())[]
# ╔═╡ 4e7f7d9e-fae4-46d2-b95d-110d36b691d9
md"The difference in the MIS size is:"
# ╔═╡ d0e49c1f-457d-4b61-ad0e-347afb029114
petersen_res.mis_overhead
# ╔═╡ 03d8adb3-0bf4-44e6-9b0a-fffc90410cfc
md"Find an MIS of the mapped KSG and map it back an MIS on the source graph."
# ╔═╡ 0d08cb1a-f7f3-4d63-bd70-78103db086b3
mis_petersen_ksg = solve(GenericTensorNetwork(IndependentSet(SimpleGraph(petersen_res.grid_graph))), SingleConfigMax())[].c.data
# ╔═╡ c27d8aed-c81f-4eb7-85bf-a4ed88c2537f
mis_petersen = UnitDiskMapping.map_config_back(petersen_res, collect(mis_petersen_ksg))
# ╔═╡ 20f81eef-12d3-4f2a-9b91-ccf2705685ad
md"""The obtained solution is an independent set and its size is maximized."""
# ╔═╡ 0297893c-c978-4818-aae8-26e60d8c9e9e
UnitDiskMapping.is_independent_set(petersen, mis_petersen)
# ╔═╡ 5ffe0e4f-bd2c-4d3e-98ca-61673a7e5230
count(isone, mis_petersen)
# ╔═╡ 8c1d46e8-dc36-41bd-9d9b-5a72c380ef26
md"The number printed should be consistent with the MIS size of the petersen graph."
# ╔═╡ Cell order:
# ╟─f55dbf80-8425-11ee-2e7d-4d1ad4f693af
# ╟─9017a42c-9791-4933-84a4-9ff509967323
# ╟─f0e7c030-4e43-4356-a5bb-717a7f382a17
# ╠═be011e30-74e6-49cd-b45a-288972dc5f18
# ╠═31250cb9-6f3a-429a-975d-752cb7c07883
# ╟─cb4a9655-6df2-46b3-8969-8b6f2db7c59a
# ╟─956a5c3a-b8c6-4040-9553-3b4e2337b163
# ╠═d858f57e-1706-4b73-bc23-53f7af073b0c
# ╠═a3a86c62-ee6e-4a3b-99b3-c484de3b5220
# ╠═e6170e72-0804-401e-b9e5-65b8ee7d7edb
# ╟─625bdcf4-e37e-4bb8-bd1a-907cdcc5fe24
# ╠═f9e57a6b-1186-407e-a8b1-cb8f31a17bd2
# ╟─e64e7ca4-b297-4c74-8699-bec4b4fbb843
# ╠═0a860597-0610-48f6-b1ee-711939712de4
# ╟─eeae7074-ee21-44fc-9605-3555acb84cee
# ╠═3fa6052b-74c2-453d-a473-68f4b3ca0490
# ╟─942e8dfb-b89d-4f2d-b1db-f4636d4e5de6
# ╟─766018fa-81bd-4c37-996a-0cf77b0909af
# ╠═67fd2dd2-5add-4402-9618-c9b7c7bfe95b
# ╟─aaee9dbc-5b9c-41b1-b0d4-35d2cac7c773
# ╠═114e2c42-aaa3-470b-a267-e5a7c6b08607
# ╟─e6fa2404-cbe9-4f9b-92a0-0d6fdb649c44
# ╠═0142f661-0855-45b4-852a-78f560e98c67
# ╟─fa046f3c-fd7d-4e91-b3f5-fc4591d3cae2
# ╠═0cbcd2a6-b8ae-47ff-8541-963b9dae700a
# ╟─4734dc0b-0770-4f84-8046-95a74104936f
# ╟─0f27de9f-2e06-4d5e-b96f-b7c7fdadabca
# ╠═fc968df0-832b-44c9-8335-381405b92199
# ╟─29458d07-b2b2-49af-a696-d0cb0ad35481
# ╟─fa4888b2-fc67-4285-8305-da655c42a898
# ╠═e84102e8-d3f2-4f91-87be-dba8e81462fb
# ╠═88ec52b3-73fd-4853-a69b-442f5fd2e8f7
# ╠═5621bb2a-b1c6-4f0d-921e-980b2ce849d5
# ╟─1fe6c679-2962-4c1b-8b12-4ceb77ed9e0f
# ╠═ea379863-95dd-46dd-a0a3-0a564904476a
# ╠═d405e7ec-50e3-446c-8d19-18f1a66c1e3b
# ╟─409b03d1-384b-48d3-9010-8079cbf66dbf
# ╠═a0e7da6b-3b71-43d4-a1da-f1bd953e4b50
# ╟─4f1f0ca0-dd2a-4768-9b4e-80813c9bb544
# ╠═bf97a268-cd96-4dbc-83c6-10eb1b03ddcc
# ╟─2589f112-5de5-4c98-bcd1-138b6143cd30
# ╠═1b946455-b152-4d6f-9968-7dc6e22d171a
# ╟─4e7f7d9e-fae4-46d2-b95d-110d36b691d9
# ╠═d0e49c1f-457d-4b61-ad0e-347afb029114
# ╟─03d8adb3-0bf4-44e6-9b0a-fffc90410cfc
# ╠═0d08cb1a-f7f3-4d63-bd70-78103db086b3
# ╠═c27d8aed-c81f-4eb7-85bf-a4ed88c2537f
# ╟─20f81eef-12d3-4f2a-9b91-ccf2705685ad
# ╠═0297893c-c978-4818-aae8-26e60d8c9e9e
# ╠═5ffe0e4f-bd2c-4d3e-98ca-61673a7e5230
# ╟─8c1d46e8-dc36-41bd-9d9b-5a72c380ef26
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 2584 | using UnitDiskMapping, GenericTensorNetworks, Graphs
function mapped_entry_to_compact(s::Pattern)
locs, g, pins = mapped_graph(s)
a = solve(IndependentSet(g; openvertices=pins), SizeMax())
b = mis_compactify!(copy(a))
n = length(a)
d = Dict{Int,Int}() # the mapping from bad to good
for i=1:n
val_a = a[i]
if iszero(b[i]) && !iszero(val_a)
bs_a = i-1
for j=1:n # search for the entry b[j] compactify a[i]
bs_b = j-1
if b[j] == val_a && (bs_b & bs_a) == bs_b # find you!
d[bs_a] = bs_b
break
end
end
else
d[i-1] = i-1
end
end
return d
end
# from mapped graph bounary configuration to compact bounary configuration
function source_entry_to_configs(s::Pattern)
locs, g, pins = source_graph(s)
a = solve(IndependentSet(g, openvertices=pins), ConfigsMax())
d = Dict{Int,Vector{BitVector}}() # the mapping from bad to good
for i=1:length(a)
d[i-1] = [BitVector(s) for s in a[i].c.data]
end
return d
end
function compute_mis_overhead(s)
locs1, g1, pins1 = source_graph(s)
locs2, g2, pins2 = mapped_graph(s)
m1 = mis_compactify!(solve(IndependentSet(g1, openvertices=pins1), SizeMax()))
m2 = mis_compactify!(solve(IndependentSet(g2, openvertices=pins2), SizeMax()))
@assert nv(g1) == length(locs1) && nv(g2) == length(locs2)
sig, diff = UnitDiskMapping.is_diff_by_const(GenericTensorNetworks.content.(m1), GenericTensorNetworks.content.(m2))
@assert sig
return diff
end
# from bounary configuration to MISs.
function generate_mapping(s::Pattern)
d1 = mapped_entry_to_compact(s)
d2 = source_entry_to_configs(s)
diff = compute_mis_overhead(s)
s = """function mapped_entry_to_compact(::$(typeof(s)))
return Dict($(collect(d1)))
end
function source_entry_to_configs(::$(typeof(s)))
return Dict($(collect(d2)))
end
mis_overhead(::$(typeof(s))) = $(-Int(diff))
"""
end
function dump_mapping_to_julia(filename, patterns)
s = join([generate_mapping(p) for p in patterns], "\n\n")
open(filename, "w") do f
write(f, "# Do not modify this file, because it is automatically generated by `project/createmap.jl`\n\n" * s)
end
end
dump_mapping_to_julia(joinpath(@__DIR__, "..", "src", "extracting_results.jl"),
(Cross{false}(), Cross{true}(),
Turn(), WTurn(), Branch(), BranchFix(), TrivialTurn(), TCon(), BranchFixB(),
EndTurn(),
UnitDiskMapping.simplifier_ruleset...))
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 2912 | using UnitDiskMapping, UnitDiskMapping.TikzGraph
using UnitDiskMapping: safe_get
using GenericTensorNetworks, Graphs
function all_configurations(p::Pattern)
mlocs, mg, mpins = mapped_graph(p)
gp = IndependentSet(mg, openvertices=mpins)
res = solve(gp, ConfigsMax())
configs = []
for element in res
for bs in element.c.data
push!(configs, bs)
end
end
mats = Matrix{Int}[]
@show configs
for config in configs
m, n = size(p)
mat = zeros(Int, m, n)
for (loc, c) in zip(mlocs, config)
if !iszero(c)
mat[loc.x, loc.y] = true
end
end
if rotr90(mat) ∉ mats && rotl90(mat) ∉ mats && rot180(mat) ∉ mats &&
mat[end:-1:1,:] ∉ mats && mat[:,end:-1:1] ∉ mats && # reflect x, y
Matrix(mat') ∉ mats && mat[end:-1:1,end:-1:1] ∉ mats # reflect diag, offdiag
push!(mats, mat)
end
end
return mats
end
function viz_matrix!(c, mat0, mat, dx, dy)
m, n = size(mat)
for i=1:m
for j=1:n
x, y = j+dx, i+dy
cell = mat0[i,j]
if isempty(cell)
#Node(x, y; fill="black", minimum_size="0.01cm") >> c
elseif cell.doubled
filled = iszero(safe_get(mat, i,j-1)) && iszero(safe_get(mat,i,j+1))
Node(x+0.4, y; fill=filled ? "black" : "white", minimum_size="0.4cm") >> c
filled = iszero(safe_get(mat, i-1,j)) && iszero(safe_get(mat,i+1,j))
Node(x, y+0.4; fill=filled ? "black" : "white", minimum_size="0.4cm") >> c
else
Node(x, y; fill=mat[i,j] > 0 ? "black" : "white", minimum_size="0.4cm") >> c
end
end
end
end
function vizback(p; filename)
configs = all_configurations(p)
smat = source_matrix(p)
mmat = mapped_matrix(p)
slocs, sg, spins = source_graph(p)
mlocs, mg, mpins = mapped_graph(p)
m, n = size(p)
img = canvas() do c
for (ic, mconfig) in enumerate(configs)
if ic % 2 == 0
dx = 2n+3
else
dx = 0
end
dy = -(m+1)*((ic-1) ÷ 2 -1)
Mesh(1+dx, n+dx, 1+dy, m+dy) >> c
viz_bonds!(c, mlocs, mg, dx, dy)
viz_matrix!(c, mmat, mconfig, dx, dy)
PlainText(n+1+dx, m/2+0.5+dy, "\$\\rightarrow\$") >> c
sconfig = UnitDiskMapping.map_config_back!(p, 1, 1, mconfig)
dx += n+1
Mesh(1+dx, n+dx, 1+dy, m+dy) >> c
viz_bonds!(c, slocs, sg, dx, dy)
viz_matrix!(c, smat, sconfig, dx, dy)
end
end
writepdf(filename, img)
end
function viz_bonds!(c, locs, g, dx, dy)
for e in edges(g)
a = locs[e.src]
b = locs[e.dst]
Line((a.y+dx, a.x+dy), (b.y+dx, b.x+dy); line_width="2pt") >> c
end
end
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 1086 | using UnitDiskMapping.TikzGraph
function routine(centerx, centery, width, height, text; kwargs...)
Node(centerx, centery, shape="rectangle", rounded_corners = "0.5ex", minimum_width="$(width)cm", minimum_height="$(height)cm", line_width="1pt", minimum_size="", text=text, kwargs...)
end
# aspect = width/height
function decision(centerx, centery, width, height, text; kwargs...)
Node(centerx, centery, shape="diamond", minimum_size="$(height)cm", aspect=width/height, text=text, line_width="1pt", kwargs...)
end
function arrow(nodes...; kwargs...)
Line(nodes...; arrow="->", line_width="1pt", kwargs...)
end
function draw_flowchart()
canvas(libs=["shapes"]) do c
n1 = routine(0.0, 0.0, 3, 1, "input graph") >> c
n2 = decision(0.0, -3, 3, 1, "satisfies the crossing criteria") >> c
n3 = decision(0.0, -6, 3, 1, "has a same tropical tensor") >> c
arrow(n1, n2) >> c
arrow(n2, n3; annotate="T") >> c
arrow(n2, (3.0, -3.0), (3.0, 0.0), n1; annotate="F") >> c
end
end
writepdf("_local/flowchart.tex", draw_flowchart()) | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 1926 | using UnitDiskMapping.TikzGraph, Graphs
using UnitDiskMapping: crossing_ruleset, Pattern, source_graph, mapped_graph
function command_graph!(canvas, locs, graph, pins, dx, dy, r, name)
for (i,loc) in enumerate(locs)
if count(==(loc), locs) == 2
Node(loc[1]+dx, loc[2]+dy, fill="black", draw="none", id="$name$i", minimum_size="0cm") >> canvas
Node(loc[1]+dx+0.4, loc[2]+dy, fill="black", draw="none", id="$name$i-A", minimum_size="$(r)cm") >> canvas
Node(loc[1]+dx, loc[2]+dy+0.4, fill="black", draw="none", id="$name$i-B", minimum_size="$(r)cm") >> canvas
else
Node(loc[1]+dx, loc[2]+dy, fill=i∈pins ? "red" : "black", draw="none", id="$name$i", minimum_size="$(r)cm") >> canvas
end
end
for e in edges(graph)
Line("$name$(e.src)", "$name$(e.dst)"; line_width=1.0) >> canvas
end
end
function viz_gadget(p::Pattern)
locs1, g1, pin1 = source_graph(p)
locs2, g2, pin2 = mapped_graph(p)
Gy, Gx = size(p)
locs1 = map(l->(l[2]-1, Gy-l[1]), locs1)
locs2 = map(l->(l[2]-1, Gy-l[1]), locs2)
Wx, Wy = 11, Gy
xmid, ymid = Wx/2-0.5, Wy/2-0.5
dx1, dy1 = xmid-Gx, 0
dx2, dy2 = xmid+1, 0
return canvas(; props=Dict("scale"=>"0.8")) do c
BoundingBox(-1,Wx-1,-1,Wy-1) >> c
Mesh(dx1, Gx+dx1-1, dy1, Gy+dy1-1; step="1cm", draw=rgbcolor!(c, 200,200,200), line_width=0.5) >> c
command_graph!(c, locs1, g1, pin1, dx1, dy1, 0.3, "s")
Mesh(dx2, Gx+dx2-1, dy2, Gy+dy2-1; step="1cm", draw=rgbcolor!(c, 200,200,200), line_width=0.03) >> c
command_graph!(c, locs2, g2, pin2, dx2, dy2, 0.3, "d")
PlainText(xmid, ymid, "\$\\mathbf{\\rightarrow}\$") >> c
end
end
function pattern2tikz(folder::String)
for p in crossing_ruleset
writepdf(joinpath(folder, string(typeof(p).name.name)*"-udg.tex"), viz_gadget(p))
end
end
pattern2tikz(joinpath("_local")) | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 1092 | using UnitDiskMapping, Graphs, Random
using Comonicon
using DelimitedFiles
include("readgraphs.jl")
# map all connected non-isomorphic graphs
@cast function mapall(n::Int)
graphs = load_g6(joinpath(dirname(@__DIR__), "data", "graph$n.g6"))
for g in graphs
if is_connected(g)
result = map_graph(g)
end
end
end
@cast function sample(graphname::String; seed::Int=2)
folder=joinpath(@__DIR__, "data")
if !isdir(folder)
mkpath(folder)
end
sizes = 10:10:100
Random.seed!(seed)
res_sizes = Int[]
for n in sizes
g = if graphname == "Erdos-Renyi"
erdos_renyi(n, 0.3)
elseif graphname == "3-Regular"
random_regular_graph(n, 3)
else
error("graph name $graphname not defined!")
end
res = map_graph(g; vertex_order=Greedy())
m = length(res.grid_graph.nodes)
@info "size $n, mapped graph size $m."
push!(res_sizes, m)
end
filename = "$graphname-$seed.dat"
writedlm(joinpath(folder, filename), res_sizes)
end
@main
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 1400 | ############################ G6 graphs ###########################
# NOTE: this script is copied from GraphIO.jl
function _g6StringToGraph(s::AbstractString)
V = Vector{UInt8}(s)
(nv, rest) = _g6_Np(V)
bitvec = _g6_Rp(rest)
ixs = Vector{Int}[]
n = 0
for i in 2:nv, j in 1:(i - 1)
n += 1
if bitvec[n]
push!(ixs, [j, i])
end
end
return nv, ixs
end
function _g6_Rp(bytevec::Vector{UInt8})
x = BitVector()
for byte in bytevec
bits = _int2bv(byte - 63, 6)
x = vcat(x, bits)
end
return x
end
function _int2bv(n::Int, k::Int)
bitstr = lstrip(bitstring(n), '0')
l = length(bitstr)
padding = k - l
bv = falses(k)
for i = 1:l
bv[padding + i] = (bitstr[i] == '1')
end
return bv
end
function _g6_Np(N::Vector{UInt8})
if N[1] < 0x7e return (Int(N[1] - 63), N[2:end])
elseif N[2] < 0x7e return (_bv2int(_g6_Rp(N[2:4])), N[5:end])
else return(_bv2int(_g6_Rp(N[3:8])), N[9:end])
end
end
function load_g6(filename)
res = SimpleGraph{Int}[]
open(filename) do f
while !eof(f)
line = readline(f)
nv, ixs0 = _g6StringToGraph(line)
push!(res, ixs2graph(nv, ixs0))
end
end
return res
end
function ixs2graph(n::Int, ixs)
g = SimpleGraph(n)
for (i, j) in ixs
add_edge!(g, i, j)
end
return g
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 4129 | const SHOW_WEIGHT = Ref(false)
# The static one for unweighted cells
struct ONE end
Base.one(::Type{ONE}) = ONE()
Base.show(io::IO, ::ONE) = print(io, "1")
Base.show(io::IO, ::MIME"text/plain", ::ONE) = print(io, "1")
############################ Cell ############################
# Cell does not have coordinates
abstract type AbstractCell{WT} end
Base.show(io::IO, x::AbstractCell) = print_cell(io, x; show_weight=SHOW_WEIGHT[])
Base.show(io::IO, ::MIME"text/plain", cl::AbstractCell) = Base.show(io, cl)
# SimpleCell
struct SimpleCell{WT} <: AbstractCell{WT}
occupied::Bool
weight::WT
SimpleCell(; occupied=true) = new{ONE}(occupied, ONE())
SimpleCell(x::Union{Real,ONE}; occupied=true) = new{typeof(x)}(occupied, x)
SimpleCell{T}(x::Real; occupied=true) where T = new{T}(occupied, T(x))
end
get_weight(sc::SimpleCell) = sc.weight
Base.empty(::Type{SimpleCell{WT}}) where WT = SimpleCell(one(WT); occupied=false)
Base.isempty(sc::SimpleCell) = !sc.occupied
function print_cell(io::IO, x::AbstractCell; show_weight=false)
if x.occupied
print(io, show_weight ? "$(get_weight(x))" : "●")
else
print(io, "⋅")
end
end
Base.:+(a::SimpleCell{T}, b::SimpleCell{T}) where T<:Real = a.occupied ? (b.occupied ? SimpleCell(a.weight + b.weight) : a) : b
Base.:-(a::SimpleCell{T}, b::SimpleCell{T}) where T<:Real = a.occupied ? (b.occupied ? SimpleCell(a.weight - b.weight) : a) : -b
Base.:-(b::SimpleCell{T}) where T<:Real = b.occupied ? SimpleCell(-b.weight) : b
Base.zero(::Type{SimpleCell{T}}) where T = SimpleCell(one(T); occupied=false)
WeightedSimpleCell{T<:Real} = SimpleCell{T}
UnWeightedSimpleCell = SimpleCell{ONE}
############################ Node ############################
# The node used in unweighted graph
struct Node{WT}
loc::Tuple{Int,Int}
weight::WT
end
Node(x::Real, y::Real) = Node((Int(x), Int(y)), ONE())
Node(x::Real, y::Real, w::Real) = Node((Int(x), Int(y)), w)
Node(xy::Vector{Int}) = Node(xy...)
Node(xy::Tuple{Int,Int}) = Node(xy, ONE())
getxy(p::Node) = p.loc
chxy(n::Node, loc) = Node(loc, n.weight)
Base.iterate(p::Node, i) = Base.iterate(p.loc, i)
Base.iterate(p::Node) = Base.iterate(p.loc)
Base.length(p::Node) = 2
Base.getindex(p::Node, i::Int) = p.loc[i]
offset(p::Node, xy) = chxy(p, getxy(p) .+ xy)
const WeightedNode{T<:Real} = Node{T}
const UnWeightedNode = Node{ONE}
############################ GridGraph ############################
# GridGraph
struct GridGraph{NT<:Node}
size::Tuple{Int,Int}
nodes::Vector{NT}
radius::Float64
end
function Base.show(io::IO, grid::GridGraph)
println(io, "$(typeof(grid)) (radius = $(grid.radius))")
print_grid(io, grid; show_weight=SHOW_WEIGHT[])
end
Base.size(gg::GridGraph) = gg.size
Base.size(gg::GridGraph, i::Int) = gg.size[i]
function graph_and_weights(grid::GridGraph)
return unit_disk_graph(getfield.(grid.nodes, :loc), grid.radius), getfield.(grid.nodes, :weight)
end
function Graphs.SimpleGraph(grid::GridGraph{Node{ONE}})
return unit_disk_graph(getfield.(grid.nodes, :loc), grid.radius)
end
coordinates(grid::GridGraph) = getfield.(grid.nodes, :loc)
# printing function for Grid graphs
function print_grid(io::IO, grid::GridGraph{Node{WT}}; show_weight=false) where WT
print_grid(io, cell_matrix(grid); show_weight)
end
function print_grid(io::IO, content::AbstractMatrix; show_weight=false)
for i=1:size(content, 1)
for j=1:size(content, 2)
print_cell(io, content[i,j]; show_weight)
print(io, " ")
end
if i!=size(content, 1)
println(io)
end
end
end
function cell_matrix(gg::GridGraph{Node{WT}}) where WT
mat = fill(empty(SimpleCell{WT}), gg.size)
for node in gg.nodes
mat[node.loc...] = SimpleCell(node.weight)
end
return mat
end
function GridGraph(m::AbstractMatrix{SimpleCell{WT}}, radius::Real) where WT
nodes = Node{WT}[]
for j=1:size(m, 2)
for i=1:size(m, 1)
if !isempty(m[i, j])
push!(nodes, Node((i,j), m[i,j].weight))
end
end
end
return GridGraph(size(m), nodes, radius)
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 1635 | # Copyright 2021 QuEra Computing Inc. All rights reserved.
module UnitDiskMapping
using Graphs
using LuxorGraphPlot
using LuxorGraphPlot.Luxor.Colors
# Basic types
export UnWeighted, Weighted
export Cell, AbstractCell, SimpleCell
export Node, WeightedNode, UnWeightedNode
export graph_and_weights, GridGraph, coordinates
# dragon drop methods
export map_factoring, map_qubo, map_qubo_square, map_simple_wmis, solve_factoring, multiplier
export QUBOResult, WMISResult, SquareQUBOResult, FactoringResult
# logic gates
export Gate, gate_gadget
# plotting methods
export show_grayscale, show_pins, show_config
# path-width optimized mapping
export MappingResult, map_graph, map_config_back, map_weights, trace_centers, print_config
export MappingGrid, embed_graph, apply_crossing_gadgets!, apply_simplifier_gadgets!, unapply_gadgets!
# gadgets
export Pattern, Corner, Turn, Cross, TruncatedTurn, EndTurn,
Branch, TrivialTurn, BranchFix, WTurn, TCon, BranchFixB,
RotatedGadget, ReflectedGadget, rotated_and_reflected, WeightedGadget
export vertex_overhead, source_graph, mapped_graph, mis_overhead
export @gg
# utils
export is_independent_set, unitdisk_graph
# path decomposition
export pathwidth, PathDecompositionMethod, MinhThiTrick, Greedy
@deprecate Branching MinhThiTrick
include("utils.jl")
include("Core.jl")
include("pathdecomposition/pathdecomposition.jl")
include("copyline.jl")
include("dragondrop.jl")
include("multiplier.jl")
include("logicgates.jl")
include("gadgets.jl")
include("mapping.jl")
include("weighted.jl")
include("simplifiers.jl")
include("extracting_results.jl")
include("visualize.jl")
end
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 4815 | # vslot
# ↓
# | ← vstart
# |
# |------- ← hslot
# | ↑ ← vstop
# hstop
struct CopyLine
vertex::Int
vslot::Int
hslot::Int
vstart::Int
vstop::Int
hstop::Int # there is no hstart
end
function Base.show(io::IO, cl::CopyLine)
print(io, "$(typeof(cl)) $(cl.vertex): vslot → [$(cl.vstart):$(cl.vstop),$(cl.vslot)], hslot → [$(cl.hslot),$(cl.vslot):$(cl.hstop)]")
end
Base.show(io::IO, ::MIME"text/plain", cl::CopyLine) = Base.show(io, cl)
# create copy lines using path decomposition
# `g` is the graph,
# `ordered_vertices` is a vector of vertices.
function create_copylines(g::SimpleGraph, ordered_vertices::AbstractVector{Int})
slots = zeros(Int, nv(g))
hslots = zeros(Int, nv(g))
rmorder = remove_order(g, ordered_vertices)
# assign hslots
for (i, (v, rs)) in enumerate(zip(ordered_vertices, rmorder))
# update slots
islot = findfirst(iszero, slots)
slots[islot] = v
hslots[i] = islot
for r in rs
slots[findfirst(==(r), slots)] = 0
end
end
vstarts = zeros(Int, nv(g))
vstops = zeros(Int, nv(g))
hstops = zeros(Int, nv(g))
for (i, v) in enumerate(ordered_vertices)
relevant_hslots = [hslots[j] for j=1:i if has_edge(g, ordered_vertices[j], v) || v == ordered_vertices[j]]
relevant_vslots = [i for i=1:nv(g) if has_edge(g, ordered_vertices[i], v) || v == ordered_vertices[i]]
vstarts[i] = minimum(relevant_hslots)
vstops[i] = maximum(relevant_hslots)
hstops[i] = maximum(relevant_vslots)
end
return [CopyLine(ordered_vertices[i], i, hslots[i], vstarts[i], vstops[i], hstops[i]) for i=1:nv(g)]
end
# -1 means no line
struct Block
top::Int
bottom::Int
left::Int
right::Int
connected::Int # -1 for not exist, 0 for not, 1 for yes.
end
Base.show(io::IO, ::MIME"text/plain", block::Block) = Base.show(io, block)
function Base.show(io::IO, block::Block)
print(io, "$(get_row_string(block, 1))\n$(get_row_string(block, 2))\n$(get_row_string(block, 3))")
end
function get_row_string(block::Block, i)
_s(x::Int) = x == -1 ? '⋅' : (x < 10 ? '0'+x : 'a'+(x-10))
if i == 1
return " ⋅ $(_s(block.top)) ⋅"
elseif i==2
return " $(_s(block.left)) $(block.connected == -1 ? '⋅' : (block.connected == 1 ? '●' : '○')) $(_s(block.right))"
elseif i==3
return " ⋅ $(_s(block.bottom)) ⋅"
end
end
function crossing_lattice(g, ordered_vertices)
lines = create_copylines(g, ordered_vertices)
ymin = minimum(l->l.vstart, lines)
ymax = maximum(l->l.vstop, lines)
xmin = minimum(l->l.vslot, lines)
xmax = maximum(l->l.hstop, lines)
return CrossingLattice(xmax-xmin+1, ymax-ymin+1, lines, g)
end
struct CrossingLattice <: AbstractArray{Block, 2}
width::Int
height::Int
lines::Vector{CopyLine}
graph::SimpleGraph{Int}
end
Base.size(lattice::CrossingLattice) = (lattice.height, lattice.width)
function Base.getindex(d::CrossingLattice, i::Int, j::Int)
if !(1<=i<=d.width || 1<=j<=d.height)
throw(BoundsError(d, (i, j)))
end
left = right = top = bottom = -1
for line in d.lines
# vertical slot
if line.vslot == j
if line.vstart == line.vstop == i # a row
elseif line.vstart == i # starting
@assert bottom == -1
bottom = line.vertex
elseif line.vstop == i # stopping
@assert top == -1
top = line.vertex
elseif line.vstart < i < line.vstop # middle
@assert top == -1
@assert bottom == -1
top = bottom = line.vertex
end
end
# horizontal slot
if line.hslot == i
if line.vslot == line.hstop == j # a col
elseif line.vslot == j
@assert right == -1
right = line.vertex
elseif line.hstop == j
@assert left == -1
left = line.vertex
elseif line.vslot < j < line.hstop
@assert left == -1
@assert right == -1
left = right = line.vertex
end
end
end
h = left == -1 ? right : left
v = top == -1 ? bottom : top
return Block(top, bottom, left, right, (v == -1 || h == -1) ? -1 : has_edge(d.graph, h, v))
end
Base.show(io::IO, ::MIME"text/plain", d::CrossingLattice) = Base.show(io, d)
function Base.show(io::IO, d::CrossingLattice)
for i=1:d.height
for k=1:3
for j=1:d.width
print(io, get_row_string(d[i,j], k), " ")
end
i == d.height && k==3 || println()
end
i == d.height || println()
end
end
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 11254 | # Glue multiple blocks into a whole
# `DI` and `DJ` are the overlap in row and columns between two adjacent blocks.
function glue(grid::AbstractMatrix{<:AbstractMatrix{SimpleCell{T}}}, DI::Int, DJ::Int) where T
@assert size(grid, 1) > 0 && size(grid, 2) > 0
nrow = sum(x->size(x, 1)-DI, grid[:,1]) + DI
ncol = sum(x->size(x, 2)-DJ, grid[1,:]) + DJ
res = zeros(SimpleCell{T}, nrow, ncol)
ioffset = 0
for i=1:size(grid, 1)
joffset = 0
for j=1:size(grid, 2)
chunk = grid[i, j]
res[ioffset+1:ioffset+size(chunk, 1), joffset+1:joffset+size(chunk, 2)] .+= chunk
joffset += size(chunk, 2) - DJ
j == size(grid, 2) && (ioffset += size(chunk, 1)-DI)
end
end
return res
end
"""
map_qubo(J::AbstractMatrix, h::AbstractVector) -> QUBOResult
Map a QUBO problem to a weighted MIS problem on a defected King's graph, where a QUBO problem is defined by the following Hamiltonian
```math
E(z) = -\\sum_{i<j} J_{ij} z_i z_j + \\sum_i h_i z_i
```
!!! note
The input coupling strength and onsite energies must be << 1.
A QUBO gadget is
```
⋅ ⋅ ● ⋅
● A B ⋅
⋅ C D ●
⋅ ● ⋅ ⋅
```
where `A`, `B`, `C` and `D` are weights of nodes that defined as
```math
\\begin{align}
A = -J_{ij} + 4\\\\
B = J_{ij} + 4\\\\
C = J_{ij} + 4\\\\
D = -J_{ij} + 4
\\end{align}
```
The rest nodes: `●` have weights 2 (boundary nodes have weights ``1 - h_i``).
"""
function map_qubo(J::AbstractMatrix{T1}, h::AbstractVector{T2}) where {T1, T2}
T = promote_type(T1, T2)
n = length(h)
@assert size(J) == (n, n) "The size of coupling matrix `J`: $(size(J)) not consistent with size of onsite term `h`: $(size(h))"
d = crossing_lattice(complete_graph(n), 1:n)
d = CrossingLattice(d.width, d.height, d.lines, SimpleGraph(n))
chunks = render_grid(T, d)
# add coupling
for i=1:n-1
for j=i+1:n
a = J[i,j]
chunks[i, j][2:3, 2:3] .+= SimpleCell.([-a a; a -a])
end
end
grid = glue(chunks, 0, 0)
# add one extra row
# make the grid larger by one unit
gg, pins = post_process_grid(grid, h, -h)
mis_overhead = (n - 1) * n * 4 + n - 4
return QUBOResult(gg, pins, mis_overhead)
end
"""
map_qubo_restricted(coupling::AbstractVector) -> RestrictedQUBOResult
Map a nearest-neighbor restricted QUBO problem to a weighted MIS problem on a grid graph,
where the QUBO problem can be specified by a vector of `(i, j, i', j', J)`.
```math
E(z) = -\\sum_{(i,j)\\in E} J_{ij} z_i z_j
```
A FM gadget is
```
- ⋅ + ⋅ ⋅ ⋅ + ⋅ -
⋅ ⋅ ⋅ ⋅ 4 ⋅ ⋅ ⋅ ⋅
+ ⋅ - ⋅ ⋅ ⋅ - ⋅ +
```
where `+`, `-` and `4` are weights of nodes `+J`, `-J` and `4J`.
```
- ⋅ + ⋅ ⋅ ⋅ + ⋅ -
⋅ ⋅ ⋅ 4 ⋅ 4 ⋅ ⋅ ⋅
+ ⋅ - ⋅ ⋅ ⋅ - ⋅ +
```
"""
function map_qubo_restricted(coupling::AbstractVector{Tuple{Int,Int,Int,Int,T}}) where {T}
m, n = max(maximum(x->x[1], coupling), maximum(x->x[3], coupling)), max(maximum(x->x[2], coupling), maximum(x->x[4], coupling))
hchunks = [zeros(SimpleCell{T}, 3, 9) for i=1:m, j=1:n-1]
vchunks = [zeros(SimpleCell{T}, 9, 3) for i=1:m-1, j=1:n]
# add coupling
for (i, j, i2, j2, J) in coupling
@assert (i2, j2) == (i, j+1) || (i2, j2) == (i+1, j)
if (i2, j2) == (i, j+1)
hchunks[i, j] .+= cell_matrix(gadget_qubo_restricted(J))
else
vchunks[i, j] .+= rotr90(cell_matrix(gadget_qubo_restricted(J)))
end
end
grid = glue(hchunks, -3, 3) .+ glue(vchunks, 3, -3)
return RestrictedQUBOResult(GridGraph(grid, 2.01*sqrt(2)))
end
function gadget_qubo_restricted(J::T) where T
a = abs(J)
return GridGraph((3, 9),
[
Node((1,1), -a),
Node((3,1), -a),
Node((1,9), -a),
Node((3,9), -a),
Node((1,3), a),
Node((3,3), a),
Node((1,7), a),
Node((3,7), a),
(J > 0 ? [Node((2,5), 4a)] : [Node((2,4), 4a), Node((2,6), 4a)])...
], 2.01*sqrt(2))
end
"""
map_qubo_square(coupling::AbstractVector, onsite::AbstractVector) -> SquareQUBOResult
Map a QUBO problem on square lattice to a weighted MIS problem on a grid graph,
where the QUBO problem can be specified by
* a vector coupling of `(i, j, i', j', J)`, s.t. (i', j') == (i, j+1) or (i', j') = (i+1, j).
* a vector of onsite term `(i, j, h)`.
```math
E(z) = -\\sum_{(i,j)\\in E} J_{ij} z_i z_j + h_i z_i
```
The gadget for suqare lattice QUBO problem is as follows
```
⋅ ⋅ ⋅ ⋅ ● ⋅ ⋅ ⋅ ⋅
○ ⋅ ● ⋅ ⋅ ⋅ ● ⋅ ○
⋅ ⋅ ⋅ ● ⋅ ● ⋅ ⋅ ⋅
⋅ ⋅ ⋅ ⋅ ○ ⋅ ⋅ ⋅ ⋅
```
where white circles have weight 1 and black circles have weight 2. The unit distance is `2.3`.
"""
function map_qubo_square(coupling::AbstractVector{Tuple{Int,Int,Int,Int,T1}}, onsite::AbstractVector{Tuple{Int,Int,T2}}) where {T1,T2}
T = promote_type(T1, T2)
m, n = max(maximum(x->x[1], coupling), maximum(x->x[3], coupling)), max(maximum(x->x[2], coupling), maximum(x->x[4], coupling))
hchunks = [zeros(SimpleCell{T}, 4, 9) for i=1:m, j=1:n-1]
vchunks = [zeros(SimpleCell{T}, 9, 4) for i=1:m-1, j=1:n]
# add coupling
sumJ = zero(T)
for (i, j, i2, j2, J) in coupling
@assert (i2, j2) == (i, j+1) || (i2, j2) == (i+1, j)
if (i2, j2) == (i, j+1)
hchunks[i, j] .+= cell_matrix(gadget_qubo_square(T))
hchunks[i, j][4, 5] -= SimpleCell(T(2J))
else
vchunks[i, j] .+= rotr90(cell_matrix(gadget_qubo_square(T)))
vchunks[i, j][5, 1] -= SimpleCell(T(2J))
end
sumJ += J
end
# right shift by 2
grid = glue(hchunks, -4, 1)
grid = pad(grid; left=2, right=1)
# down shift by 1
grid2 = glue(vchunks, 1, -4)
grid2 = pad(grid2; top=1, bottom=2)
grid .+= grid2
# add onsite terms
sumh = zero(T)
for (i, j, h) in onsite
grid[(i-1)*8+2, (j-1)*8+3] -= SimpleCell(T(2h))
sumh += h
end
overhead = 5 * length(coupling) - sumJ - sumh
gg = GridGraph(grid, 2.3)
pins = Int[]
for (i, j, h) in onsite
push!(pins, findfirst(n->n.loc == ((i-1)*8+2, (j-1)*8+3), gg.nodes))
end
return SquareQUBOResult(gg, pins, overhead)
end
function pad(m::AbstractMatrix{T}; top::Int=0, bottom::Int=0, left::Int=0, right::Int=0) where T
if top != 0
padt = zeros(T, 0, size(m, 2))
m = vglue([padt, m], -top)
end
if bottom != 0
padb = zeros(T, 0, size(m, 2))
m = vglue([m, padb], -bottom)
end
if left != 0
padl = zeros(T, size(m, 1), 0)
m = hglue([padl, m], -left)
end
if right != 0
padr = zeros(T, size(m, 1), 0)
m = hglue([m, padr], -right)
end
return m
end
vglue(mats, i::Int) = glue(reshape(mats, :, 1), i, 0)
hglue(mats, j::Int) = glue(reshape(mats, 1, :), 0, j)
function gadget_qubo_square(::Type{T}) where T
DI = 1
DJ = 2
one = T(1)
two = T(2)
return GridGraph((4, 9),
[
Node((1+DI,1), one),
Node((1+DI,1+DJ), two),
Node((DI,3+DJ), two),
Node((1+DI,5+DJ), two),
Node((1+DI,5+2DJ), one),
Node((2+DI,2+DJ), two),
Node((2+DI,4+DJ), two),
Node((3+DI,3+DJ), one),
], 2.3)
end
"""
map_simple_wmis(graph::SimpleGraph, weights::AbstractVector) -> WMISResult
Map a weighted MIS problem to a weighted MIS problem on a defected King's graph.
!!! note
The input coupling strength and onsite energies must be << 1.
This method does not provide path decomposition based optimization, check [`map_graph`](@ref) for the path decomposition optimized version.
"""
function map_simple_wmis(graph::SimpleGraph, weights::AbstractVector{T}) where {T}
n = length(weights)
@assert nv(graph) == n
d = crossing_lattice(complete_graph(n), 1:n)
d = CrossingLattice(d.width, d.height, d.lines, graph)
chunks = render_grid(T, d)
grid = glue(chunks, 0, 0)
# add one extra row
# make the grid larger by one unit
gg, pins = post_process_grid(grid, weights, zeros(T, length(weights)))
mis_overhead = (n - 1) * n * 4 + n - 4 - 2*ne(graph)
return WMISResult(gg, pins, mis_overhead)
end
function render_grid(::Type{T}, cl::CrossingLattice) where T
n = nv(cl.graph)
z = empty(SimpleCell{T})
one = SimpleCell(T(1))
two = SimpleCell(T(2))
four = SimpleCell(T(4))
# replace chunks
# for pure crossing, they are
# ●
# ● ● ●
# ● ● ●
# ●
# for crossing with edge, they are
# ●
# ● ● ●
# ● ●
# ●
return map(zip(CartesianIndices(cl), cl)) do (ci, block)
if block.bottom != -1 && block.left != -1
# NOTE: for border vertices, we set them to weight 1.
if has_edge(cl.graph, ci.I...)
[z (block.top == -1 ? one : two) z z;
(ci.I[2]==2 ? one : two) two two z;
z two z (block.right == -1 ? one : two);
z (ci.I[1] == n-1 ? one : two) z z]
else
[z z (block.top == -1 ? one : two) z;
(ci.I[2]==2 ? one : two) four four z;
z four four (block.right == -1 ? one : two);
z (ci.I[1] == n-1 ? one : two) z z]
end
elseif block.top != -1 && block.right != -1 # the L turn
m = fill(z, 4, 4)
m[1, 3] = m[2, 4] = two
m
else
# do nothing
fill(z, 4, 4)
end
end
end
# h0 and h1 are offset of energy for 0 state and 1 state.
function post_process_grid(grid::Matrix{SimpleCell{T}}, h0, h1) where T
n = length(h0)
mat = grid[1:end-4, 5:end]
mat[2, 1] += SimpleCell{T}(h0[1]) # top left
mat[size(mat, 1),size(mat, 2)-2] += SimpleCell{T}(h1[end]) # bottom right
for j=1:length(h0)-1
# top side
offset = mat[1, j*4-1].occupied ? 1 : 2
@assert mat[1, j*4-offset].occupied
mat[1, j*4-offset] += SimpleCell{T}(h0[1+j])
# right side
offset = mat[j*4-1,size(mat,2)].occupied ? 1 : 2
@assert mat[j*4-offset,size(mat,2)].occupied
mat[j*4-offset,size(mat, 2)] += SimpleCell{T}(h1[j])
end
# generate GridGraph from matrix
locs = [Node(ci.I, mat[ci].weight) for ci in findall(x->x.occupied, mat)]
gg = GridGraph(size(mat), locs, 1.5)
# find pins
pins = [findfirst(x->x.loc == (2, 1), locs)]
for i=1:n-1
push!(pins, findfirst(x->x.loc == (1, i*4-1) || x.loc == (1,i*4-2), locs))
end
return gg, pins
end
struct QUBOResult{NT}
grid_graph::GridGraph{NT}
pins::Vector{Int}
mis_overhead::Int
end
function map_config_back(res::QUBOResult, cfg)
return 1 .- cfg[res.pins]
end
struct WMISResult{NT}
grid_graph::GridGraph{NT}
pins::Vector{Int}
mis_overhead::Int
end
function map_config_back(res::WMISResult, cfg)
return cfg[res.pins]
end
struct RestrictedQUBOResult{NT}
grid_graph::GridGraph{NT}
end
function map_config_back(res::RestrictedQUBOResult, cfg)
end
struct SquareQUBOResult{NT}
grid_graph::GridGraph{NT}
pins::Vector{Int}
mis_overhead::Float64
end
function map_config_back(res::SquareQUBOResult, cfg)
return cfg[res.pins]
end
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 5587 | # Do not modify this file, because it is automatically generated by `project/createmap.jl`
function mapped_entry_to_compact(::Cross{false})
return Dict([5 => 4, 12 => 4, 8 => 0, 1 => 0, 0 => 0, 6 => 0, 11 => 11, 9 => 9, 14 => 2, 3 => 2, 7 => 2, 4 => 4, 13 => 13, 15 => 11, 2 => 2, 10 => 2])
end
function source_entry_to_configs(::Cross{false})
return Dict(Pair{Int64, Vector{BitVector}}[5 => [[1, 0, 1, 0, 0, 0, 1, 0, 1], [1, 0, 0, 1, 0, 0, 1, 0, 1]], 12 => [[0, 0, 1, 0, 1, 0, 1, 0, 1], [0, 1, 0, 0, 1, 0, 1, 0, 1]], 8 => [[0, 0, 1, 0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0, 1, 0, 0]], 1 => [[1, 0, 1, 0, 0, 0, 0, 1, 0], [1, 0, 0, 1, 0, 0, 0, 1, 0], [1, 0, 1, 0, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 0, 1, 0, 0]], 0 => [[0, 1, 0, 1, 0, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0, 1, 0, 0]], 6 => [[0, 1, 0, 1, 0, 1, 0, 0, 1]], 11 => [[1, 0, 1, 0, 1, 1, 0, 1, 0]], 9 => [[1, 0, 1, 0, 1, 0, 0, 1, 0], [1, 0, 1, 0, 1, 0, 1, 0, 0]], 14 => [[0, 0, 1, 0, 1, 1, 0, 0, 1], [0, 1, 0, 0, 1, 1, 0, 0, 1]], 3 => [[1, 0, 1, 0, 0, 1, 0, 1, 0], [1, 0, 0, 1, 0, 1, 0, 1, 0]], 7 => [[1, 0, 1, 0, 0, 1, 0, 0, 1], [1, 0, 0, 1, 0, 1, 0, 0, 1]], 4 => [[0, 1, 0, 1, 0, 0, 1, 0, 1]], 13 => [[1, 0, 1, 0, 1, 0, 1, 0, 1]], 15 => [[1, 0, 1, 0, 1, 1, 0, 0, 1]], 2 => [[0, 1, 0, 1, 0, 1, 0, 1, 0]], 10 => [[0, 0, 1, 0, 1, 1, 0, 1, 0], [0, 1, 0, 0, 1, 1, 0, 1, 0]]])
end
mis_overhead(::Cross{false}) = -1
function mapped_entry_to_compact(::Cross{true})
return Dict([5 => 5, 12 => 12, 8 => 0, 1 => 0, 0 => 0, 6 => 6, 11 => 11, 9 => 9, 14 => 14, 3 => 3, 7 => 7, 4 => 0, 13 => 13, 15 => 15, 2 => 0, 10 => 10])
end
function source_entry_to_configs(::Cross{true})
return Dict(Pair{Int64, Vector{BitVector}}[5 => [], 12 => [[0, 0, 1, 0, 0, 1]], 8 => [[0, 0, 1, 0, 1, 0]], 1 => [[1, 0, 0, 0, 1, 0]], 0 => [[0, 1, 0, 0, 1, 0]], 6 => [[0, 1, 0, 1, 0, 1]], 11 => [[1, 0, 1, 1, 0, 0]], 9 => [[1, 0, 1, 0, 1, 0]], 14 => [[0, 0, 1, 1, 0, 1]], 3 => [[1, 0, 0, 1, 0, 0]], 7 => [], 4 => [[0, 1, 0, 0, 0, 1]], 13 => [], 15 => [], 2 => [[0, 1, 0, 1, 0, 0]], 10 => [[0, 0, 1, 1, 0, 0]]])
end
mis_overhead(::Cross{true}) = -1
function mapped_entry_to_compact(::Turn)
return Dict([0 => 0, 2 => 0, 3 => 3, 1 => 0])
end
function source_entry_to_configs(::Turn)
return Dict(Pair{Int64, Vector{BitVector}}[0 => [[0, 1, 0, 1, 0]], 2 => [[0, 1, 0, 0, 1], [0, 0, 1, 0, 1]], 3 => [[1, 0, 1, 0, 1]], 1 => [[1, 0, 1, 0, 0], [1, 0, 0, 1, 0]]])
end
mis_overhead(::Turn) = -1
function mapped_entry_to_compact(::WTurn)
return Dict([0 => 0, 2 => 0, 3 => 3, 1 => 0])
end
function source_entry_to_configs(::WTurn)
return Dict(Pair{Int64, Vector{BitVector}}[0 => [[1, 0, 1, 0, 0]], 2 => [[0, 0, 0, 1, 1], [1, 0, 0, 0, 1]], 3 => [[0, 1, 0, 1, 1]], 1 => [[0, 1, 0, 1, 0], [0, 1, 1, 0, 0]]])
end
mis_overhead(::WTurn) = -1
function mapped_entry_to_compact(::Branch)
return Dict([0 => 0, 4 => 0, 5 => 5, 6 => 6, 2 => 0, 7 => 7, 3 => 3, 1 => 0])
end
function source_entry_to_configs(::Branch)
return Dict(Pair{Int64, Vector{BitVector}}[0 => [[0, 1, 0, 1, 0, 0, 1, 0]], 4 => [[0, 0, 1, 0, 0, 1, 0, 1], [0, 1, 0, 0, 0, 1, 0, 1], [0, 1, 0, 1, 0, 0, 0, 1]], 5 => [[1, 0, 1, 0, 0, 1, 0, 1]], 6 => [[0, 0, 1, 0, 1, 1, 0, 1], [0, 1, 0, 0, 1, 1, 0, 1]], 2 => [[0, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1, 0, 0], [0, 1, 0, 0, 1, 1, 0, 0]], 7 => [[1, 0, 1, 0, 1, 1, 0, 1]], 3 => [[1, 0, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1, 0, 0]], 1 => [[1, 0, 1, 0, 0, 0, 1, 0], [1, 0, 1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 0, 1, 0]]])
end
mis_overhead(::Branch) = -1
function mapped_entry_to_compact(::BranchFix)
return Dict([0 => 0, 2 => 2, 3 => 1, 1 => 1])
end
function source_entry_to_configs(::BranchFix)
return Dict(Pair{Int64, Vector{BitVector}}[0 => [[0, 1, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0]], 2 => [[0, 1, 0, 1, 0, 1]], 3 => [[1, 0, 0, 1, 0, 1], [1, 0, 1, 0, 0, 1]], 1 => [[1, 0, 1, 0, 1, 0]]])
end
mis_overhead(::BranchFix) = -1
function mapped_entry_to_compact(::TrivialTurn)
return Dict([0 => 0, 2 => 2, 3 => 3, 1 => 1])
end
function source_entry_to_configs(::TrivialTurn)
return Dict(Pair{Int64, Vector{BitVector}}[0 => [[0, 0]], 2 => [[0, 1]], 3 => [], 1 => [[1, 0]]])
end
mis_overhead(::TrivialTurn) = 0
function mapped_entry_to_compact(::TCon)
return Dict([0 => 0, 4 => 0, 5 => 5, 6 => 6, 2 => 2, 7 => 7, 3 => 3, 1 => 0])
end
function source_entry_to_configs(::TCon)
return Dict(Pair{Int64, Vector{BitVector}}[0 => [[0, 0, 1, 0]], 4 => [[0, 0, 0, 1]], 5 => [[1, 0, 0, 1]], 6 => [[0, 1, 0, 1]], 2 => [[0, 1, 1, 0]], 7 => [], 3 => [], 1 => [[1, 0, 0, 0]]])
end
mis_overhead(::TCon) = 0
function mapped_entry_to_compact(::BranchFixB)
return Dict([0 => 0, 2 => 2, 3 => 3, 1 => 1])
end
function source_entry_to_configs(::BranchFixB)
return Dict(Pair{Int64, Vector{BitVector}}[0 => [[0, 0, 1, 0], [0, 1, 0, 0]], 2 => [[0, 0, 1, 1]], 3 => [[1, 0, 0, 1]], 1 => [[1, 1, 0, 0]]])
end
mis_overhead(::BranchFixB) = -1
function mapped_entry_to_compact(::EndTurn)
return Dict([0 => 0, 1 => 1])
end
function source_entry_to_configs(::EndTurn)
return Dict(Pair{Int64, Vector{BitVector}}[0 => [[0, 0, 1], [0, 1, 0]], 1 => [[1, 0, 1]]])
end
mis_overhead(::EndTurn) = -1
function mapped_entry_to_compact(::UnitDiskMapping.DanglingLeg)
return Dict([0 => 0, 1 => 1])
end
function source_entry_to_configs(::UnitDiskMapping.DanglingLeg)
return Dict(Pair{Int64, Vector{BitVector}}[0 => [[1, 0, 0], [0, 1, 0]], 1 => [[1, 0, 1]]])
end
mis_overhead(::UnitDiskMapping.DanglingLeg) = -1
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 11075 | """
### Provides
1. visualization of mapping
2. the script for generating backward mapping (project/createmap.jl)
3. the script for tikz visualization (project/vizgadget.jl)
"""
abstract type Pattern end
"""
### Properties
* size
* cross_location
* source: (locs, graph, pins/auto)
* mapped: (locs, graph/auto, pins/auto)
### Requires
1. equivalence in MIS-compact tropical tensor (you can check it with tests),
2. the size is <= [-2, 2] x [-2, 2] at the cross (not checked, requires cross offset information),
3. ancillas does not appear at the boundary (not checked),
"""
abstract type CrossPattern <: Pattern end
function source_matrix(p::Pattern)
m, n = size(p)
locs, _, _ = source_graph(p)
a = locs2matrix(m, n, locs)
if iscon(p)
for i in connected_nodes(p)
connect_cell!(a, locs[i]...)
end
end
return a
end
function mapped_matrix(p::Pattern)
m, n = size(p)
locs, _, _ = mapped_graph(p)
locs2matrix(m, n, locs)
end
function locs2matrix(m, n, locs::AbstractVector{NT}) where NT <: Node
a = fill(empty(cell_type(NT)), m, n)
for loc in locs
add_cell!(a, loc)
end
return a
end
function Base.match(p::Pattern, matrix, i, j)
a = source_matrix(p)
m, n = size(a)
all(ci->safe_get(matrix, i+ci.I[1]-1, j+ci.I[2]-1) == a[ci], CartesianIndices((m, n)))
end
function unmatch(p::Pattern, matrix, i, j)
a = mapped_matrix(p)
m, n = size(a)
all(ci->safe_get(matrix, i+ci.I[1]-1, j+ci.I[2]-1) == a[ci], CartesianIndices((m, n)))
end
function safe_get(matrix, i, j)
m, n = size(matrix)
(i<1 || i>m || j<1 || j>n) && return 0
return matrix[i, j]
end
function safe_set!(matrix, i, j, val)
m, n = size(matrix)
if i<1 || i>m || j<1 || j>n
@assert val == 0
else
matrix[i, j] = val
end
return val
end
Base.show(io::IO, ::MIME"text/plain", p::Pattern) = Base.show(io, p)
function Base.show(io::IO, p::Pattern)
print_grid(io, source_matrix(p))
println(io)
println(io, " "^(size(p)[2]-1) * "↓")
print_grid(io, mapped_matrix(p))
end
function apply_gadget!(p::Pattern, matrix, i, j)
a = mapped_matrix(p)
m, n = size(a)
for ci in CartesianIndices((m, n))
safe_set!(matrix, i+ci.I[1]-1, j+ci.I[2]-1, a[ci]) # e.g. the Truncated gadget requires safe set
end
return matrix
end
function unapply_gadget!(p, matrix, i, j)
a = source_matrix(p)
m, n = size(a)
for ci in CartesianIndices((m, n))
safe_set!(matrix, i+ci.I[1]-1, j+ci.I[2]-1, a[ci]) # e.g. the Truncated gadget requires safe set
end
return matrix
end
struct Cross{CON} <: CrossPattern end
iscon(::Cross{CON}) where {CON} = CON
# ⋅ ● ⋅
# ◆ ◉ ●
# ⋅ ◆ ⋅
function source_graph(::Cross{true})
locs = Node.([(2,1), (2,2), (2,3), (1,2), (2,2), (3,2)])
g = simplegraph([(1,2), (2,3), (4,5), (5,6), (1,6)])
return locs, g, [1,4,6,3]
end
# ⋅ ● ⋅
# ● ● ●
# ⋅ ● ⋅
function mapped_graph(::Cross{true})
locs = Node.([(2,1), (2,2), (2,3), (1,2), (3,2)])
locs, unitdisk_graph(locs, 1.5), [1,4,5,3]
end
Base.size(::Cross{true}) = (3, 3)
cross_location(::Cross{true}) = (2,2)
connected_nodes(::Cross{true}) = [1, 6]
# ⋅ ⋅ ● ⋅ ⋅
# ● ● ◉ ● ●
# ⋅ ⋅ ● ⋅ ⋅
# ⋅ ⋅ ● ⋅ ⋅
function source_graph(::Cross{false})
locs = Node.([(2,1), (2,2), (2,3), (2,4), (2,5), (1,3), (2,3), (3,3), (4,3)])
g = simplegraph([(1,2), (2,3), (3,4), (4,5), (6,7), (7,8), (8,9)])
return locs, g, [1,6,9,5]
end
# ⋅ ⋅ ● ⋅ ⋅
# ● ● ● ● ●
# ⋅ ● ● ● ⋅
# ⋅ ⋅ ● ⋅ ⋅
function mapped_graph(::Cross{false})
locs = Node.([(2,1), (2,2), (2,3), (2,4), (2,5), (1,3), (3,3), (4,3), (3, 2), (3,4)])
locs, unitdisk_graph(locs, 1.5), [1,6,8,5]
end
Base.size(::Cross{false}) = (4, 5)
cross_location(::Cross{false}) = (2,3)
struct Turn <: CrossPattern end
iscon(::Turn) = false
# ⋅ ● ⋅ ⋅
# ⋅ ● ⋅ ⋅
# ⋅ ● ● ●
# ⋅ ⋅ ⋅ ⋅
function source_graph(::Turn)
locs = Node.([(1,2), (2,2), (3,2), (3,3), (3,4)])
g = simplegraph([(1,2), (2,3), (3,4), (4,5)])
return locs, g, [1,5]
end
# ⋅ ● ⋅ ⋅
# ⋅ ⋅ ● ⋅
# ⋅ ⋅ ⋅ ●
# ⋅ ⋅ ⋅ ⋅
function mapped_graph(::Turn)
locs = Node.([(1,2), (2,3), (3,4)])
locs, unitdisk_graph(locs, 1.5), [1,3]
end
Base.size(::Turn) = (4, 4)
cross_location(::Turn) = (3,2)
struct Branch <: CrossPattern end
# ⋅ ● ⋅ ⋅
# ⋅ ● ⋅ ⋅
# ⋅ ● ● ●
# ⋅ ● ● ⋅
# ⋅ ● ⋅ ⋅
function source_graph(::Branch)
locs = Node.([(1,2), (2,2), (3,2),(3,3),(3,4),(4,3),(4,2),(5,2)])
g = simplegraph([(1,2), (2,3), (3, 4), (4,5), (4,6), (6,7), (7,8)])
return locs, g, [1, 5, 8]
end
# ⋅ ● ⋅ ⋅
# ⋅ ⋅ ● ⋅
# ⋅ ● ⋅ ●
# ⋅ ⋅ ● ⋅
# ⋅ ● ⋅ ⋅
function mapped_graph(::Branch)
locs = Node.([(1,2), (2,3), (3,2),(3,4),(4,3),(5,2)])
return locs, unitdisk_graph(locs, 1.5), [1,4,6]
end
Base.size(::Branch) = (5, 4)
cross_location(::Branch) = (3,2)
iscon(::Branch) = false
struct BranchFix <: CrossPattern end
# ⋅ ● ⋅ ⋅
# ⋅ ● ● ⋅
# ⋅ ● ● ⋅
# ⋅ ● ⋅ ⋅
function source_graph(::BranchFix)
locs = Node.([(1,2), (2,2), (2,3),(3,3),(3,2),(4,2)])
g = simplegraph([(1,2), (2,3), (3,4),(4,5), (5,6)])
return locs, g, [1, 6]
end
# ⋅ ● ⋅ ⋅
# ⋅ ● ⋅ ⋅
# ⋅ ● ⋅ ⋅
# ⋅ ● ⋅ ⋅
function mapped_graph(::BranchFix)
locs = Node.([(1,2),(2,2),(3,2),(4,2)])
return locs, unitdisk_graph(locs, 1.5), [1, 4]
end
Base.size(::BranchFix) = (4, 4)
cross_location(::BranchFix) = (2,2)
iscon(::BranchFix) = false
struct WTurn <: CrossPattern end
# ⋅ ⋅ ⋅ ⋅
# ⋅ ⋅ ● ●
# ⋅ ● ● ⋅
# ⋅ ● ⋅ ⋅
function source_graph(::WTurn)
locs = Node.([(2,3), (2,4), (3,2),(3,3),(4,2)])
g = simplegraph([(1,2), (1,4), (3,4),(3,5)])
return locs, g, [2, 5]
end
# ⋅ ⋅ ⋅ ⋅
# ⋅ ⋅ ⋅ ●
# ⋅ ⋅ ● ⋅
# ⋅ ● ⋅ ⋅
function mapped_graph(::WTurn)
locs = Node.([(2,4),(3,3),(4,2)])
return locs, unitdisk_graph(locs, 1.5), [1, 3]
end
Base.size(::WTurn) = (4, 4)
cross_location(::WTurn) = (2,2)
iscon(::WTurn) = false
struct BranchFixB <: CrossPattern end
# ⋅ ⋅ ⋅ ⋅
# ⋅ ⋅ ● ⋅
# ⋅ ● ● ⋅
# ⋅ ● ⋅ ⋅
function source_graph(::BranchFixB)
locs = Node.([(2,3),(3,2),(3,3),(4,2)])
g = simplegraph([(1,3), (2,3), (2,4)])
return locs, g, [1, 4]
end
# ⋅ ⋅ ⋅ ⋅
# ⋅ ⋅ ⋅ ⋅
# ⋅ ● ⋅ ⋅
# ⋅ ● ⋅ ⋅
function mapped_graph(::BranchFixB)
locs = Node.([(3,2),(4,2)])
return locs, unitdisk_graph(locs, 1.5), [1, 2]
end
Base.size(::BranchFixB) = (4, 4)
cross_location(::BranchFixB) = (2,2)
iscon(::BranchFixB) = false
struct TCon <: CrossPattern end
# ⋅ ◆ ⋅ ⋅
# ◆ ● ⋅ ⋅
# ⋅ ● ⋅ ⋅
function source_graph(::TCon)
locs = Node.([(1,2), (2,1), (2,2),(3,2)])
g = simplegraph([(1,2), (1,3), (3,4)])
return locs, g, [1,2,4]
end
connected_nodes(::TCon) = [1, 2]
# ⋅ ● ⋅ ⋅
# ● ⋅ ● ⋅
# ⋅ ● ⋅ ⋅
function mapped_graph(::TCon)
locs = Node.([(1,2),(2,1),(2,3),(3,2)])
return locs, unitdisk_graph(locs, 1.5), [1,2,4]
end
Base.size(::TCon) = (3,4)
cross_location(::TCon) = (2,2)
iscon(::TCon) = true
struct TrivialTurn <: CrossPattern end
# ⋅ ◆
# ◆ ⋅
function source_graph(::TrivialTurn)
locs = Node.([(1,2), (2,1)])
g = simplegraph([(1,2)])
return locs, g, [1,2]
end
# ⋅ ●
# ● ⋅
function mapped_graph(::TrivialTurn)
locs = Node.([(1,2),(2,1)])
return locs, unitdisk_graph(locs, 1.5), [1,2]
end
Base.size(::TrivialTurn) = (2,2)
cross_location(::TrivialTurn) = (2,2)
iscon(::TrivialTurn) = true
connected_nodes(::TrivialTurn) = [1, 2]
struct EndTurn <: CrossPattern end
# ⋅ ● ⋅ ⋅
# ⋅ ● ● ⋅
# ⋅ ⋅ ⋅ ⋅
function source_graph(::EndTurn)
locs = Node.([(1,2), (2,2), (2,3)])
g = simplegraph([(1,2), (2,3)])
return locs, g, [1]
end
# ⋅ ● ⋅ ⋅
# ⋅ ⋅ ⋅ ⋅
# ⋅ ⋅ ⋅ ⋅
function mapped_graph(::EndTurn)
locs = Node.([(1,2)])
return locs, unitdisk_graph(locs, 1.5), [1]
end
Base.size(::EndTurn) = (3,4)
cross_location(::EndTurn) = (2,2)
iscon(::EndTurn) = false
############## Rotation and Flip ###############
struct RotatedGadget{GT} <: Pattern
gadget::GT
n::Int
end
function Base.size(r::RotatedGadget)
m, n = size(r.gadget)
return r.n%2==0 ? (m, n) : (n, m)
end
struct ReflectedGadget{GT} <: Pattern
gadget::GT
mirror::String
end
function Base.size(r::ReflectedGadget)
m, n = size(r.gadget)
return r.mirror == "x" || r.mirror == "y" ? (m, n) : (n, m)
end
for T in [:RotatedGadget, :ReflectedGadget]
@eval function source_graph(r::$T)
locs, graph, pins = source_graph(r.gadget)
center = cross_location(r.gadget)
locs = map(loc->offset(loc, _get_offset(r)), _apply_transform.(Ref(r), locs, Ref(center)))
return locs, graph, pins
end
@eval function mapped_graph(r::$T)
locs, graph, pins = mapped_graph(r.gadget)
center = cross_location(r.gadget)
locs = map(loc->offset(loc, _get_offset(r)), _apply_transform.(Ref(r), locs, Ref(center)))
return locs, graph, pins
end
@eval cross_location(r::$T) = cross_location(r.gadget) .+ _get_offset(r)
@eval function _get_offset(r::$T)
m, n = size(r.gadget)
a, b = _apply_transform.(Ref(r), Node.([(1,1), (m,n)]), Ref(cross_location(r.gadget)))
return 1-min(a[1], b[1]), 1-min(a[2], b[2])
end
@eval iscon(r::$T) = iscon(r.gadget)
@eval connected_nodes(r::$T) = connected_nodes(r.gadget)
@eval vertex_overhead(p::$T) = vertex_overhead(p.gadget)
@eval function mapped_entry_to_compact(r::$T)
return mapped_entry_to_compact(r.gadget)
end
@eval function source_entry_to_configs(r::$T)
return source_entry_to_configs(r.gadget)
end
@eval mis_overhead(p::$T) = mis_overhead(p.gadget)
end
for T in [:RotatedGadget, :ReflectedGadget]
@eval _apply_transform(r::$T, node::Node, center) = chxy(node, _apply_transform(r, getxy(node), center))
end
function _apply_transform(r::RotatedGadget, loc::Tuple{Int,Int}, center)
for _=1:r.n
loc = rotate90(loc, center)
end
return loc
end
function _apply_transform(r::ReflectedGadget, loc::Tuple{Int,Int}, center)
loc = if r.mirror == "x"
reflectx(loc, center)
elseif r.mirror == "y"
reflecty(loc, center)
elseif r.mirror == "diag"
reflectdiag(loc, center)
elseif r.mirror == "offdiag"
reflectoffdiag(loc, center)
else
throw(ArgumentError("reflection direction $(r.direction) is not defined!"))
end
return loc
end
function vertex_overhead(p::Pattern)
nv(mapped_graph(p)[2]) - nv(source_graph(p)[1])
end
function mapped_boundary_config(p::Pattern, config)
_boundary_config(mapped_graph(p)[3], config)
end
function source_boundary_config(p::Pattern, config)
_boundary_config(source_graph(p)[3], config)
end
function _boundary_config(pins, config)
res = 0
for (i,p) in enumerate(pins)
res += Int(config[p]) << (i-1)
end
return res
end
function rotated_and_reflected(p::Pattern)
patterns = Pattern[p]
source_matrices = [source_matrix(p)]
for pi in [[RotatedGadget(p, i) for i=1:3]..., [ReflectedGadget(p, axis) for axis in ["x", "y", "diag", "offdiag"]]...]
m = source_matrix(pi)
if m ∉ source_matrices
push!(patterns, pi)
push!(source_matrices, m)
end
end
return patterns
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 3804 | struct Gate{SYM} end
Gate(x::Symbol) = Gate{x}()
autosize(locs) = maximum(first, locs), maximum(last, locs)
function gate_gadget(::Gate{:NOT})
locs = [(1, 1), (3, 1)]
weights = fill(1, 2)
inputs, outputs = [1], [2]
return GridGraph(autosize(locs), Node.(locs, weights), 2.3), inputs, outputs
end
function gate_gadget(::Gate{:NOR})
locs = [(2, 1), (1, 3), (2, 5), (4, 2), (4, 4)]
weights = fill(1, 5)
inputs, outputs = [1, 3], [2]
return GridGraph(autosize(locs), Node.(locs, weights), 2.3), inputs, outputs
end
function gate_gadget(::Gate{:OR})
locs = [(3, 1), (2, 3), (3, 5), (5, 2), (5, 4), (1,3)]
weights = fill(1, 6)
weights[2] = 2
inputs, outputs = [1,3], [6]
return GridGraph(autosize(locs), Node.(locs, weights), 2.3), inputs, outputs
end
function gate_gadget(::Gate{:NXOR})
locs = [(2,1), (1,3), (2, 5), (3, 2), (3, 4), (4, 3)]
weights = [1, 2, 1, 2, 2, 1]
inputs, outputs = [1,3], [6]
return GridGraph(autosize(locs), Node.(locs, weights), 2.3), inputs, outputs
end
function gate_gadget(::Gate{:XOR})
locs = [(2,1), (1,3), (2, 5), (3, 2), (3, 4), (4, 3), (6, 3)]
weights = [1, 2, 1, 2, 2, 2, 1]
inputs, outputs = [1,3], [7]
return GridGraph(autosize(locs), Node.(locs, weights), 2.3), inputs, outputs
end
function gate_gadget(::Gate{:AND})
u = 1
locs = [(-3u, 0), (-u, 0), (0, -u), (0, u), (2u, -u), (2u, u)]
locs = map(loc->loc .+ (2u+1, u+1), locs)
weights = fill(1, 6)
weights[2] = 2
inputs, outputs = [1,6], [3]
return GridGraph(autosize(locs), Node.(locs, weights), 2.3), inputs, outputs
end
# inputs are mutually disconnected
# full adder
struct VertexScheduler
count::Base.RefValue{Int}
circuit::Vector{Pair{Gate,Vector{Int}}}
edges::Vector{Tuple{Int,Int}}
weights::Vector{Int}
end
VertexScheduler() = VertexScheduler(Ref(0), Pair{Gate,Vector{Int}}[], Tuple{Int,Int}[], Int[])
function newvertices!(vs::VertexScheduler, k::Int=1)
vs.count[] += k
append!(vs.weights, zeros(Int,k))
return vs.count[]-k+1:vs.count[]
end
function apply!(g::Gate{SYM}, vs::VertexScheduler, a::Int, b::Int) where SYM
locs, edges, weights, inputs, outputs = gadget(g)
vertices = newvertices!(vs, length(locs)-2)
out = vertices[end]
# map locations
mapped_locs = zeros(Int, length(locs))
mapped_locs[inputs] .= [a, b]
mapped_locs[outputs] .= [out]
mapped_locs[setdiff(1:length(locs), inputs ∪ outputs)] .= vertices[1:end-1]
# map edges
for (i, j) in edges
push!(vs.edges, (mapped_locs[i], mapped_locs[j]))
end
# add weights
vs.weights[mapped_locs] .+= weights
# update circuit
push!(vs.circuit, g => [inputs..., outputs...])
return out
end
function logicgate_multiplier()
c = VertexScheduler()
x0, x1, x2, x3 = newvertices!(c, 4)
x12 = apply!(Gate(:AND), c, x1, x2)
x4 = apply!(Gate(:XOR), c, x0, x12)
x5 = apply!(Gate(:XOR), c, x3, x4) # 5 is sum
x6 = apply!(Gate(:AND), c, x3, x4)
x7 = apply!(Gate(:AND), c, x0, x12)
x8 = apply!(Gate(:OR), c, x6, x7)
return c, [x0, x1, x2, x3], [x8, x5]
end
truth_table(solver, gate::Gate) = truth_table(solver, gate_gadget(gate)...)
function truth_table(misenumerator, grid_graph::GridGraph, inputs, outputs)
g, ws = graph_and_weights(grid_graph)
res = misenumerator(g, ws)
# create a k-v pair
d = Dict{Int,Int}()
for config in res
input = sum(i->config[inputs[i]]<<(i-1), 1:length(inputs))
output = sum(i->config[outputs[i]]<<(i-1), 1:length(outputs))
if !haskey(d,input)
d[input] = output
else
@assert d[input] == output
end
end
@assert length(d) == 1<<length(inputs)
return [d[i] for i=0:1<<length(inputs)-1]
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 15556 | # UnWeighted mode
struct UnWeighted end
# Weighted mode
struct Weighted end
Base.@kwdef struct MCell{WT} <: AbstractCell{WT}
occupied::Bool = true
doubled::Bool = false
connected::Bool = false
weight::WT = ONE()
end
MCell(x::SimpleCell) = MCell(; occupied=x.occupied, weight=x.weight)
const UnWeightedMCell = MCell{ONE}
const WeightedMCell{T<:Real} = MCell{T}
Base.isempty(cell::MCell) = !cell.occupied
Base.empty(::Type{MCell{WT}}) where WT = MCell(occupied=false, weight=one(WT))
function print_cell(io::IO, x::UnWeightedMCell; show_weight=false)
if x.occupied
if x.doubled
print(io, "◉")
elseif x.connected
print(io, "◆")
else
print(io, "●")
end
else
print(io, "⋅")
end
end
function print_cell(io::IO, x::WeightedMCell; show_weight=false)
if x.occupied
if x.doubled
if x.weight == 2
print(io, "◉")
else
print(io, "?")
end
elseif x.connected
if x.weight == 1
print(io, "◇")
elseif x.weight == 2
print(io, "◆")
else
print(io, "?")
end
elseif x.weight >= 3
print(io, show_weight ? "$(x.weight)" : "▴")
elseif x.weight == 2
print(io, "●")
elseif x.weight == 1
print(io, "○")
elseif x.weight == 0
print(io, "∅")
else
print(io, "?")
end
else
print(io, "⋅")
end
end
struct MappingGrid{CT<:AbstractCell}
lines::Vector{CopyLine}
padding::Int
content::Matrix{CT}
end
Base.:(==)(ug::MappingGrid{CT}, ug2::MappingGrid{CT}) where CT = ug.lines == ug2.lines && ug.content == ug2.content
Base.size(ug::MappingGrid, args...) = size(ug.content, args...)
padding(ug::MappingGrid) = ug.padding
coordinates(ug::MappingGrid) = [ci.I for ci in findall(!isempty, ug.content)]
function add_cell!(m::AbstractMatrix{<:MCell}, node::UnWeightedNode)
i, j = node
if isempty(m[i,j])
m[i, j] = MCell()
else
@assert !(m[i, j].doubled) && !(m[i, j].connected)
m[i, j] = MCell(doubled=true)
end
end
function connect_cell!(m::AbstractMatrix{<:MCell}, i::Int, j::Int)
if m[i, j] !== MCell()
error("can not connect at [$i,$j] of type $(m[i,j])")
end
m[i, j] = MCell(connected=true)
end
function Graphs.SimpleGraph(ug::MappingGrid)
if any(x->x.doubled || x.connected, ug.content)
error("This mapping is not done yet!")
end
return unitdisk_graph(coordinates(ug), 1.5)
end
function GridGraph(ug::MappingGrid)
if any(x->x.doubled || x.connected, ug.content)
error("This mapping is not done yet!")
end
return GridGraph(size(ug), [Node((i,j), ug.content[i,j].weight) for (i, j) in coordinates(ug)], 1.5)
end
Base.show(io::IO, ug::MappingGrid) = print_grid(io, ug.content)
Base.copy(ug::MappingGrid) = MappingGrid(ug.lines, ug.padding, copy(ug.content))
# TODO:
# 1. check if the resulting graph is a unit-disk
# 2. other simplification rules
const crossing_ruleset = (Cross{false}(),
Turn(), WTurn(), Branch(), BranchFix(), TCon(), TrivialTurn(),
RotatedGadget(TCon(), 1), ReflectedGadget(Cross{true}(), "y"),
ReflectedGadget(TrivialTurn(), "y"), BranchFixB(), EndTurn(),
ReflectedGadget(RotatedGadget(TCon(), 1), "y"))
get_ruleset(::UnWeighted) = crossing_ruleset
function apply_crossing_gadgets!(mode, ug::MappingGrid)
ruleset = get_ruleset(mode)
tape = Tuple{Pattern,Int,Int}[]
n = length(ug.lines)
for j=1:n # start from 0 because there can be one empty padding column/row.
for i=1:n
for pattern in ruleset
x, y = crossat(ug, i, j) .- cross_location(pattern) .+ (1,1)
if match(pattern, ug.content, x, y)
apply_gadget!(pattern, ug.content, x, y)
push!(tape, (pattern, x, y))
break
end
end
end
end
return ug, tape
end
function apply_simplifier_gadgets!(ug::MappingGrid; ruleset, nrepeat::Int=10)
tape = Tuple{Pattern,Int,Int}[]
for _ in 1:nrepeat, pattern in ruleset
for j=0:size(ug, 2) # start from 0 because there can be one empty padding column/row.
for i=0:size(ug, 1)
if match(pattern, ug.content, i, j)
apply_gadget!(pattern, ug.content, i, j)
push!(tape, (pattern, i, j))
end
end
end
end
return ug, tape
end
function unapply_gadgets!(ug::MappingGrid, tape, configurations)
for (pattern, i, j) in Base.Iterators.reverse(tape)
@assert unmatch(pattern, ug.content, i, j)
for c in configurations
map_config_back!(pattern, i, j, c)
end
unapply_gadget!(pattern, ug.content, i, j)
end
cfgs = map(configurations) do c
map_config_copyback!(ug, c)
end
return ug, cfgs
end
# returns a vector of configurations
function _map_config_back(s::Pattern, config)
d1 = mapped_entry_to_compact(s)
d2 = source_entry_to_configs(s)
# get the pin configuration
bc = mapped_boundary_config(s, config)
return d2[d1[bc]]
end
function map_config_back!(p::Pattern, i, j, configuration)
m, n = size(p)
locs, graph, pins = mapped_graph(p)
config = [configuration[i+loc[1]-1, j+loc[2]-1] for loc in locs]
newconfig = rand(_map_config_back(p, config))
# clear canvas
for i_=i:i+m-1, j_=j:j+n-1
safe_set!(configuration,i_,j_, 0)
end
locs0, graph0, pins0 = source_graph(p)
for (k, loc) in enumerate(locs0)
configuration[i+loc[1]-1,j+loc[2]-1] += newconfig[k]
end
return configuration
end
function map_config_copyback!(ug::MappingGrid, c::AbstractMatrix)
res = zeros(Int, length(ug.lines))
for line in ug.lines
locs = copyline_locations(nodetype(ug), line; padding=ug.padding)
count = 0
for (iloc, loc) in enumerate(locs)
gi, ci = ug.content[loc...], c[loc...]
if gi.doubled
if ci == 2
count += 1
elseif ci == 0
count += 0
else # ci = 1
if c[locs[iloc-1]...] == 0 && c[locs[iloc+1]...] == 0
count += 1
end
end
elseif !isempty(gi)
count += ci
else
error("check your grid at location ($(locs...))!")
end
end
res[line.vertex] = count - (length(locs) ÷ 2)
end
return res
end
function remove_order(g::AbstractGraph, vertex_order::AbstractVector{Int})
addremove = [Int[] for _=1:nv(g)]
adjm = adjacency_matrix(g)
counts = zeros(Int, nv(g))
totalcounts = sum(adjm; dims=1)
removed = Int[]
for (i, v) in enumerate(vertex_order)
counts .+= adjm[:,v]
for j=1:nv(g)
# to avoid repeated remove!
if j ∉ removed && counts[j] == totalcounts[j]
push!(addremove[max(i, findfirst(==(j), vertex_order))], j)
push!(removed, j)
end
end
end
return addremove
end
function center_location(tc::CopyLine; padding::Int)
s = 4
I = s*(tc.hslot-1)+padding+2
J = s*(tc.vslot-1)+padding+1
return I, J
end
# NT is node type
function copyline_locations(::Type{NT}, tc::CopyLine; padding::Int) where NT
s = 4
nline = 0
I, J = center_location(tc; padding=padding)
locations = NT[]
# grow up
start = I+s*(tc.vstart-tc.hslot)+1
if tc.vstart < tc.hslot
nline += 1
end
for i=I:-1:start # even number of nodes up
push!(locations, node(NT, i, J, 1+(i!=start))) # half weight on last node
end
# grow down
stop = I+s*(tc.vstop-tc.hslot)-1
if tc.vstop > tc.hslot
nline += 1
end
for i=I:stop # even number of nodes down
if i == I
push!(locations, node(NT, i+1, J+1, 2))
else
push!(locations, node(NT, i, J, 1+(i!=stop)))
end
end
# grow right
stop = J+s*(tc.hstop-tc.vslot)-1
if tc.hstop > tc.vslot
nline += 1
end
for j=J+2:stop # even number of nodes right
push!(locations, node(NT, I, j, 1 + (j!=stop))) # half weight on last node
end
push!(locations, node(NT, I, J+1, nline)) # center node
return locations
end
nodetype(::MappingGrid{MCell{WT}}) where WT = Node{WT}
cell_type(::Type{Node{WT}}) where WT = MCell{WT}
nodetype(::UnWeighted) = UnWeightedNode
node(::Type{<:UnWeightedNode}, i, j, w) = Node(i, j)
function ugrid(mode, g::SimpleGraph, vertex_order::AbstractVector{Int}; padding=2, nrow=nv(g))
@assert padding >= 2
# create an empty canvas
n = nv(g)
s = 4
N = (n-1)*s+1+2*padding
M = nrow*s+1+2*padding
u = fill(empty(mode isa Weighted ? MCell{Int} : MCell{ONE}), M, N)
# add T-copies
copylines = create_copylines(g, vertex_order)
for tc in copylines
for loc in copyline_locations(nodetype(mode), tc; padding=padding)
add_cell!(u, loc)
end
end
ug = MappingGrid(copylines, padding, u)
for e in edges(g)
I, J = crossat(ug, e.src, e.dst)
connect_cell!(ug.content, I, J-1)
if !isempty(ug.content[I-1, J])
connect_cell!(ug.content, I-1, J)
else
connect_cell!(ug.content, I+1, J)
end
end
return ug
end
function crossat(ug::MappingGrid, v, w)
i, j = findfirst(x->x.vertex==v, ug.lines), findfirst(x->x.vertex==w, ug.lines)
i, j = minmax(i, j)
hslot = ug.lines[i].hslot
s = 4
return (hslot-1)*s+2+ug.padding, (j-1)*s+1+ug.padding
end
"""
embed_graph([mode,] g::SimpleGraph; vertex_order=MinhThiTrick())
Embed graph `g` into a unit disk grid, where the optional argument `mode` can be `Weighted()` or `UnWeighted`.
The `vertex_order` can be a vector or one of the following inputs
* `Greedy()` fast but non-optimal.
* `MinhThiTrick()` slow but optimal.
"""
embed_graph(g::SimpleGraph; vertex_order=MinhThiTrick()) = embed_graph(UnWeighted(), g; vertex_order)
function embed_graph(mode, g::SimpleGraph; vertex_order=MinhThiTrick())
if vertex_order isa AbstractVector
L = PathDecomposition.Layout(g, collect(vertex_order[end:-1:1]))
else
L = pathwidth(g, vertex_order)
end
# we reverse the vertex order of the pathwidth result,
# because this order corresponds to the vertex-seperation.
ug = ugrid(mode, g, L.vertices[end:-1:1]; padding=2, nrow=L.vsep+1)
return ug
end
function mis_overhead_copylines(ug::MappingGrid{WC}) where {WC}
sum(ug.lines) do line
mis_overhead_copyline(WC <: WeightedMCell ? Weighted() : UnWeighted(), line)
end
end
function mis_overhead_copyline(w::W, line::CopyLine) where W
if W === Weighted
s = 4
return (line.hslot - line.vstart) * s +
(line.vstop - line.hslot) * s +
max((line.hstop - line.vslot) * s - 2, 0)
else
locs = copyline_locations(nodetype(w), line; padding=2)
@assert length(locs) % 2 == 1
return length(locs) ÷ 2
end
end
##### Interfaces ######
struct MappingResult{NT}
grid_graph::GridGraph{NT}
lines::Vector{CopyLine}
padding::Int
mapping_history::Vector{Tuple{Pattern,Int,Int}}
mis_overhead::Int
end
"""
map_graph([mode=UnWeighted(),] g::SimpleGraph; vertex_order=MinhThiTrick(), ruleset=[...])
Map a graph to a unit disk grid graph that being "equivalent" to the original graph, and return a `MappingResult` instance.
Here "equivalent" means a maximum independent set in the grid graph can be mapped back to
a maximum independent set of the original graph in polynomial time.
Positional Arguments
-------------------------------------
* `mode` is optional, it can be `Weighted()` (default) or `UnWeighted()`.
* `g` is a graph instance, check the documentation of [`Graphs`](https://juliagraphs.org/Graphs.jl/dev/) for details.
Keyword Arguments
-------------------------------------
* `vertex_order` specifies the order finding algorithm for vertices.
Different vertex orders have different path width, i.e. different depth of mapped grid graph.
It can be a vector or one of the following inputs
* `Greedy()` fast but not optimal.
* `MinhThiTrick()` slow but optimal.
* `ruleset` specifies and extra set of optimization patterns (not the crossing patterns).
"""
function map_graph(g::SimpleGraph; vertex_order=MinhThiTrick(), ruleset=default_simplifier_ruleset(UnWeighted()))
map_graph(UnWeighted(), g; ruleset=ruleset, vertex_order=vertex_order)
end
function map_graph(mode, g::SimpleGraph; vertex_order=MinhThiTrick(), ruleset=default_simplifier_ruleset(mode))
ug = embed_graph(mode, g; vertex_order=vertex_order)
mis_overhead0 = mis_overhead_copylines(ug)
ug, tape = apply_crossing_gadgets!(mode, ug)
ug, tape2 = apply_simplifier_gadgets!(ug; ruleset=ruleset)
mis_overhead1 = isempty(tape) ? 0 : sum(x->mis_overhead(x[1]), tape)
mis_overhead2 = isempty(tape2) ? 0 : sum(x->mis_overhead(x[1]), tape2)
return MappingResult(GridGraph(ug), ug.lines, ug.padding, vcat(tape, tape2) , mis_overhead0 + mis_overhead1 + mis_overhead2)
end
"""
map_configs_back(res::MappingResult, configs::AbstractVector)
Map MIS solutions for the mapped graph to a solution for the source graph.
"""
function map_configs_back(res::MappingResult, configs::AbstractVector)
cs = map(configs) do cfg
c = zeros(Int, size(res.grid_graph))
for (i, n) in enumerate(res.grid_graph.nodes)
c[n.loc...] = cfg[i]
end
c
end
return _map_configs_back(res, cs)
end
"""
map_config_back(map_result, config)
Map a solution `config` for the mapped MIS problem to a solution for the source problem.
"""
function map_config_back(res::MappingResult, cfg)
return map_configs_back(res, [cfg])[]
end
function _map_configs_back(r::MappingResult{UnWeightedNode}, configs::AbstractVector{<:AbstractMatrix})
cm = cell_matrix(r.grid_graph)
ug = MappingGrid(r.lines, r.padding, MCell.(cm))
unapply_gadgets!(ug, r.mapping_history, copy.(configs))[2]
end
default_simplifier_ruleset(::UnWeighted) = vcat([rotated_and_reflected(rule) for rule in simplifier_ruleset]...)
default_simplifier_ruleset(::Weighted) = weighted.(default_simplifier_ruleset(UnWeighted()))
print_config(mr::MappingResult, config::AbstractMatrix) = print_config(stdout, mr, config)
function print_config(io::IO, mr::MappingResult, config::AbstractMatrix)
content = cell_matrix(mr.grid_graph)
@assert size(content) == size(config)
for i=1:size(content, 1)
for j=1:size(content, 2)
cell = content[i, j]
if !isempty(cell)
if !iszero(config[i,j])
print(io, "●")
else
print(io, "○")
end
else
if !iszero(config[i,j])
error("configuration not valid, there is not vertex at location $((i,j)).")
end
print(io, "⋅")
end
print(io, " ")
end
if i!=size(content, 1)
println(io)
end
end
end
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 4262 | const multiplier_locs_and_weights = [
((0, -3), 1), # x0
((2, -1), 2),
((8, 0), 1), # x1
((1, 1), 3),
((3, 1), 3),
((0, 2), 1), # x7
((5, 2), 2),
((7, 2), 3),
((9, 2), 3),
((11, 2), 1), # x2
((1, 3), 3),
((3, 3), 3),
((7, 4), 4),
((9, 4), 3),
((1, 5), 2),
((3, 6), 2),
((5, 6), 2),
((8, 6), 2),
((2, 7), 3),
((4, 7), 2),
((8, 7), 2),
((0, 8), 1), # x6
((11, 8), 1), # x3
((2, 9), 3),
((4, 9), 2),
((7, 9), 3),
((9, 9), 3),
((3, 10), 2),
((5, 10), 2),
((7, 10), 3),
((9, 10), 3),
((1, 11), 2),
((8, 12), 2),
((3, 13), 2),
((5, 14), 2),
((7, 14), 3),
((9, 14), 3),
((11, 15), 1), # x4
((7, 16), 3),
((9, 16), 3),
((8, 18), 1), # x5
]
"""
multiplier()
Returns the multiplier as a `SimpleGridGraph` instance and a vector of `pins`.
The logic gate constraints on `pins` are
* x1 + x2*x3 + x4 == x5 + 2*x7
* x2 == x6
* x3 == x8
"""
function multiplier()
xmin = minimum(x->x[1][1], multiplier_locs_and_weights)
xmax = maximum(x->x[1][1], multiplier_locs_and_weights)
ymin = minimum(x->x[1][2], multiplier_locs_and_weights)
ymax = maximum(x->x[1][2], multiplier_locs_and_weights)
nodes = [Node(loc[2]-ymin+1, loc[1]-xmin+1, w) for (loc, w) in multiplier_locs_and_weights]
pins = [1,3,10,23,38,41,22,6]
return GridGraph((ymax-ymin+1, xmax-xmin+1), nodes, 2*sqrt(2)*1.01), pins
end
"""
map_factoring(M::Int, N::Int)
Setup a factoring circuit with M-bit `q` register (second input) and N-bit `p` register (first input).
The `m` register size is (M+N-1), which stores the output.
Call [`solve_factoring`](@ref) to solve a factoring problem with the mapping result.
"""
function map_factoring(M::Int, N::Int)
block, pin = multiplier()
m, n = size(block) .- (4, 1)
G = glue(fill(cell_matrix(block), (M,N)), 4, 1)
WIDTH = 3
leftside = zeros(eltype(G), size(G,1), WIDTH)
for i=1:M-1
for (a, b) in [(12, WIDTH), (14,1), (16,1), (18, 2), (14,1), (16,1), (18, 2), (19,WIDTH)]
leftside[(i-1)*m+a, b] += SimpleCell(1)
end
end
G = glue(reshape([leftside, G], 1, 2), 0, 1)
gg = GridGraph(G, block.radius)
locs = getfield.(gg.nodes, :loc)
coo(i, j) = ((i-1)*m, (j-1)*n+WIDTH-1)
pinloc(i, j, index) = findfirst(==(block.nodes[pin[index]].loc .+ coo(i, j)), locs)
pp = [pinloc(1, j, 2) for j=N:-1:1]
pq = [pinloc(i, N, 3) for i=1:M]
pm = [
[pinloc(i, N, 5) for i=1:M]...,
[pinloc(M, j, 5) for j=N-1:-1:1]...,
pinloc(M,1,7)
]
p0 = [
[pinloc(1, j, 1) for j=1:N]...,
[pinloc(i, N, 4) for i=1:M]...,
]
return FactoringResult(gg, pp, pq, pm, p0)
end
struct FactoringResult{NT}
grid_graph::GridGraph{NT}
pins_input1::Vector{Int}
pins_input2::Vector{Int}
pins_output::Vector{Int}
pins_zeros::Vector{Int}
end
function map_config_back(res::FactoringResult, cfg)
return asint(cfg[res.pins_input1]), asint(cfg[res.pins_input2])
end
# convert vector to integer
asint(v::AbstractVector) = sum(i->v[i]<<(i-1), 1:length(v))
"""
solve_factoring(missolver, mres::FactoringResult, x::Int) -> (Int, Int)
Solve a factoring problem by solving the mapped weighted MIS problem on a unit disk grid graph.
It returns (a, b) such that ``a b = x`` holds.
`missolver(graph, weights)` should return a vector of integers as the solution.
"""
function solve_factoring(missolver, mres::FactoringResult, target::Int)
g, ws = graph_and_weights(mres.grid_graph)
mg, vmap = set_target(g, [mres.pins_zeros..., mres.pins_output...], target << length(mres.pins_zeros))
res = missolver(mg, ws[vmap])
cfg = zeros(Int, nv(g))
cfg[vmap] .= res
return map_config_back(mres, cfg)
end
function set_target(g::SimpleGraph, pins::AbstractVector, target::Int)
vs = collect(vertices(g))
for (i, p) in enumerate(pins)
bitval = (target >> (i-1)) & 1
if bitval == 1
# remove pin and its neighbor
vs = setdiff(vs, neighbors(g, p) ∪ [p])
else
# remove pin
vs = setdiff(vs, [p])
end
end
return induced_subgraph(g, vs)
end
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 4480 | """
### Properties
* size
* source: (locs, graph/auto, pins/auto)
* mapped: (locs, graph/auto, pins/auto)
### Requires
1. equivalence in MIS-compact tropical tensor (you can check it with tests),
2. ancillas does not appear at the boundary (not checked),
"""
abstract type SimplifyPattern <: Pattern end
iscon(s::SimplifyPattern) = false
cross_location(s::SimplifyPattern) = size(s) .÷ 2
function source_locations end
function mapped_locations end
function mapped_graph(p::SimplifyPattern)
locs = mapped_locations(p)
return locs, unitdisk_graph(locs, 1.5), vertices_on_boundary(locs, size(p)...)
end
function source_graph(p::SimplifyPattern)
locs = source_locations(p)
return locs, unitdisk_graph(locs, 1.5), vertices_on_boundary(locs, size(p)...)
end
function vertices_on_boundary(locs, m, n)
findall(loc->loc[1]==1 || loc[1]==m || loc[2]==1 || loc[2]==n, locs)
end
vertices_on_boundary(gg::GridGraph) = vertices_on_boundary(gg.nodes, gg.size...)
#################### Macros ###############################3
function gridgraphfromstring(mode::Union{Weighted, UnWeighted}, str::String; radius)
item_array = Vector{Tuple{Bool,Int}}[]
for line in split(str, "\n")
items = [item for item in split(line, " ") if !isempty(item)]
list = if mode isa Weighted # TODO: the weighted version need to be tested! Consider removing it!
@assert all(item->item ∈ (".", "⋅", "@", "●", "o", "◯") || (length(item)==1 && isdigit(item[1])), items)
map(items) do item
if item ∈ ("@", "●")
true, 2
elseif item ∈ ("o", "◯")
true, 1
elseif item ∈ (".", "⋅")
false, 0
else
true, parse(Int, item)
end
end
else
@assert all(item->item ∈ (".", "⋅", "@", "●"), items)
map(items) do item
item ∈ ("@", "●") ? (true, 1) : (false, 0)
end
end
if !isempty(list)
push!(item_array, list)
end
end
@assert all(==(length(item_array[1])), length.(item_array))
mat = permutedims(hcat(item_array...), (2,1))
# generate GridGraph from matrix
locs = [_to_node(mode, ci.I, mat[ci][2]) for ci in findall(first, mat)]
return GridGraph(size(mat), locs, radius)
end
_to_node(::UnWeighted, loc::Tuple{Int,Int}, w::Int) = Node(loc)
_to_node(::Weighted, loc::Tuple{Int,Int}, w::Int) = Node(loc, w)
function gg_func(mode, expr)
@assert expr.head == :(=)
name = expr.args[1]
pair = expr.args[2]
@assert pair.head == :(call) && pair.args[1] == :(=>)
g1 = gridgraphfromstring(mode, pair.args[2]; radius=1.5)
g2 = gridgraphfromstring(mode, pair.args[3]; radius=1.5)
@assert g1.size == g2.size
@assert g1.nodes[vertices_on_boundary(g1)] == g2.nodes[vertices_on_boundary(g2)]
return quote
struct $(esc(name)) <: SimplifyPattern end
Base.size(::$(esc(name))) = $(g1.size)
$UnitDiskMapping.source_locations(::$(esc(name))) = $(g1.nodes)
$UnitDiskMapping.mapped_locations(::$(esc(name))) = $(g2.nodes)
$(esc(name))
end
end
macro gg(expr)
gg_func(UnWeighted(), expr)
end
# # How to add a new simplification rule
# 1. specify a gadget like the following. Use either `o` and `●` to specify a vertex,
# either `.` or `⋅` to specify a placeholder.
@gg DanglingLeg =
"""
⋅ ⋅ ⋅
⋅ ● ⋅
⋅ ● ⋅
⋅ ● ⋅
"""=>"""
⋅ ⋅ ⋅
⋅ ⋅ ⋅
⋅ ⋅ ⋅
⋅ ● ⋅
"""
# 2. add your gadget to simplifier ruleset.
const simplifier_ruleset = SimplifyPattern[DanglingLeg()]
# set centers (vertices with weight 1) for the weighted version
source_centers(::WeightedGadget{DanglingLeg}) = [(2,2)]
mapped_centers(::WeightedGadget{DanglingLeg}) = [(4,2)]
# 3. run the script `project/createmap` to generate `mis_overhead` and other informations required
# for mapping back. (Note: will overwrite the source file `src/extracting_results.jl`)
# simple rules for crossing gadgets
for (GT, s1, m1, s3, m3) in [
(:(DanglingLeg), [1], [1], [], []),
]
@eval function weighted(g::$GT)
slocs, sg, spins = source_graph(g)
mlocs, mg, mpins = mapped_graph(g)
sw, mw = fill(2, length(slocs)), fill(2, length(mlocs))
sw[$(s1)] .= 1
sw[$(s3)] .= 3
mw[$(m1)] .= 1
mw[$(m3)] .= 3
return weighted(g, sw, mw)
end
end
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 1898 | function simplegraph(edgelist::AbstractVector{Tuple{Int,Int}})
nv = maximum(x->max(x...), edgelist)
g = SimpleGraph(nv)
for (i,j) in edgelist
add_edge!(g, i, j)
end
return g
end
for OP in [:rotate90, :reflectx, :reflecty, :reflectdiag, :reflectoffdiag]
@eval function $OP(loc, center)
dx, dy = $OP(loc .- center)
return (center[1]+dx, center[2]+dy)
end
end
function rotate90(loc)
return -loc[2], loc[1]
end
function reflectx(loc)
loc[1], -loc[2]
end
function reflecty(loc)
-loc[1], loc[2]
end
function reflectdiag(loc)
-loc[2], -loc[1]
end
function reflectoffdiag(loc)
loc[2], loc[1]
end
function unitdisk_graph(locs::AbstractVector, unit::Real)
n = length(locs)
g = SimpleGraph(n)
for i=1:n, j=i+1:n
if sum(abs2, locs[i] .- locs[j]) < unit ^ 2
add_edge!(g, i, j)
end
end
return g
end
function is_independent_set(g::SimpleGraph, config)
for e in edges(g)
if config[e.src] == config[e.dst] == 1
return false
end
end
return true
end
function is_diff_by_const(t1::AbstractArray{T}, t2::AbstractArray{T}) where T <: Real
x = NaN
for (a, b) in zip(t1, t2)
if isinf(a) && isinf(b)
continue
end
if isinf(a) || isinf(b)
return false, 0
end
if isnan(x)
x = (a - b)
elseif x != a - b
return false, 0
end
end
return true, x
end
"""
unit_disk_graph(locs::AbstractVector, unit::Real)
Create a unit disk graph with locations specified by `locs` and unit distance `unit`.
"""
function unit_disk_graph(locs::AbstractVector, unit::Real)
n = length(locs)
g = SimpleGraph(n)
for i=1:n, j=i+1:n
if sum(abs2, locs[i] .- locs[j]) < unit ^ 2
add_edge!(g, i, j)
end
end
return g
end
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 4380 | # normalized to minimum weight and maximum weight
function LuxorGraphPlot.show_graph(gg::GridGraph;
format = :svg,
filename = nothing,
padding_left = 10,
padding_right = 10,
padding_top = 10,
padding_bottom = 10,
show_number = false,
config = GraphDisplayConfig(),
texts = nothing,
vertex_colors=nothing,
)
texts !== nothing && show_number && @warn "not showing number due to the manually specified node texts."
# plot!
unit = 33.0
coos = coordinates(gg)
xmin, xmax = extrema(first.(coos))
ymin, ymax = extrema(last.(coos))
nodestore() do ns
filledlocs = map(coo->circle!((unit * (coo[2] - 1), -unit * (coo[1] - 1)), config.vertex_size), coos)
emptylocs, edges = [], []
for i=xmin:xmax, j=ymin:ymax
(i, j) ∉ coos && push!(emptylocs, circle!(((j-1) * unit, -(i-1) * unit), config.vertex_size/10))
end
for e in Graphs.edges(graph_and_weights(gg)[1])
i, j = e.src, e.dst
push!(edges, Connection(filledlocs[i], filledlocs[j]))
end
with_nodes(ns; format, filename, padding_bottom, padding_left, padding_right, padding_top, background=config.background) do
config2 = copy(config)
config2 = GraphDisplayConfig(; vertex_color="#333333", vertex_stroke_color="transparent")
texts = texts===nothing && show_number ? string.(1:length(filledlocs)) : texts
LuxorGraphPlot.render_nodes(filledlocs, config; texts, vertex_colors)
LuxorGraphPlot.render_edges(edges, config)
LuxorGraphPlot.render_nodes(emptylocs, config2; texts=nothing)
end
end
end
function show_grayscale(gg::GridGraph; wmax=nothing, kwargs...)
_, ws0 = graph_and_weights(gg)
ws = tame_weights.(ws0)
if wmax === nothing
wmax = maximum(abs, ws)
end
cmap = Colors.colormap("RdBu", 200)
# 0 -> 100
# wmax -> 200
# wmin -> 1
vertex_colors= [cmap[max(1, round(Int, 100+w/wmax*100))] for w in ws]
show_graph(gg; vertex_colors, kwargs...)
end
tame_weights(w::ONE) = 1.0
tame_weights(w::Real) = w
function show_pins(gg::GridGraph, color_pins::AbstractDict; kwargs...)
vertex_colors=String[]
texts=String[]
for i=1:length(gg.nodes)
c, t = haskey(color_pins, i) ? color_pins[i] : ("white", "")
push!(vertex_colors, c)
push!(texts, t)
end
show_graph(gg; vertex_colors, texts, kwargs...)
end
function show_pins(mres::FactoringResult; kwargs...)
color_pins = Dict{Int,Tuple{String,String}}()
for (i, pin) in enumerate(mres.pins_input1)
color_pins[pin] = ("green", "p$('₀'+i)")
end
for (i, pin) in enumerate(mres.pins_input2)
color_pins[pin] = ("blue", "q$('₀'+i)")
end
for (i, pin) in enumerate(mres.pins_output)
color_pins[pin] = ("red", "m$('₀'+i)")
end
for (i, pin) in enumerate(mres.pins_zeros)
color_pins[pin] = ("gray", "0")
end
show_pins(mres.grid_graph, color_pins; kwargs...)
end
for TP in [:QUBOResult, :WMISResult, :SquareQUBOResult]
@eval function show_pins(mres::$TP; kwargs...)
color_pins = Dict{Int,Tuple{String,String}}()
for (i, pin) in enumerate(mres.pins)
color_pins[pin] = ("red", "v$('₀'+i)")
end
show_pins(mres.grid_graph, color_pins; kwargs...)
end
end
function show_config(gg::GridGraph, config; kwargs...)
vertex_colors=[iszero(c) ? "white" : "red" for c in config]
show_graph(gg; vertex_colors, kwargs...)
end
function show_pins(mres::MappingResult{<:WeightedNode}; kwargs...)
locs = getfield.(mres.grid_graph.nodes, :loc)
center_indices = map(loc->findfirst(==(loc), locs), trace_centers(mres))
color_pins = Dict{Int,Tuple{String,String}}()
for (i, pin) in enumerate(center_indices)
color_pins[pin] = ("red", "v$('₀'+i)")
end
show_pins(mres.grid_graph, color_pins; kwargs...)
end
function show_pins(gate::Gate; kwargs...)
grid_graph, inputs, outputs = gate_gadget(gate)
color_pins = Dict{Int,Tuple{String,String}}()
for (i, pin) in enumerate(inputs)
color_pins[pin] = ("red", "x$('₀'+i)")
end
for (i, pin) in enumerate(outputs)
color_pins[pin] = ("blue", "y$('₀'+i)")
end
show_pins(grid_graph, color_pins; kwargs...)
end
| UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 6242 | struct WeightedGadget{GT, WT} <: Pattern
gadget::GT
source_weights::Vector{WT}
mapped_weights::Vector{WT}
end
const WeightedGadgetTypes = Union{WeightedGadget, RotatedGadget{<:WeightedGadget}, ReflectedGadget{<:WeightedGadget}}
function add_cell!(m::AbstractMatrix{<:WeightedMCell}, node::WeightedNode)
i, j = node
if isempty(m[i,j])
m[i, j] = MCell(weight=node.weight)
else
@assert !(m[i, j].doubled) && !(m[i, j].connected) && m[i,j].weight == node.weight
m[i, j] = MCell(doubled=true, weight=node.weight)
end
end
function connect_cell!(m::AbstractMatrix{<:WeightedMCell}, i::Int, j::Int)
if !m[i, j].occupied || m[i,j].doubled || m[i,j].connected
error("can not connect at [$i,$j] of type $(m[i,j])")
end
m[i, j] = MCell(connected=true, weight=m[i,j].weight)
end
nodetype(::Weighted) = WeightedNode{Int}
node(::Type{<:WeightedNode}, i, j, w) = Node(i, j, w)
weighted(c::Pattern, source_weights, mapped_weights) = WeightedGadget(c, source_weights, mapped_weights)
unweighted(w::WeightedGadget) = w.gadget
weighted(r::RotatedGadget, source_weights, mapped_weights) = RotatedGadget(weighted(r.gadget, source_weights, mapped_weights), r.n)
weighted(r::ReflectedGadget, source_weights, mapped_weights) = ReflectedGadget(weighted(r.gadget, source_weights, mapped_weights), r.mirror)
weighted(r::RotatedGadget) = RotatedGadget(weighted(r.gadget), r.n)
weighted(r::ReflectedGadget) = ReflectedGadget(weighted(r.gadget), r.mirror)
unweighted(r::RotatedGadget) = RotatedGadget(unweighted(r.gadget), r.n)
unweighted(r::ReflectedGadget) = ReflectedGadget(unweighted(r.gadget), r.mirror)
mis_overhead(w::WeightedGadget) = mis_overhead(w.gadget) * 2
function source_graph(r::WeightedGadget)
raw = unweighted(r)
locs, g, pins = source_graph(raw)
return [_mul_weight(loc, r.source_weights[i]) for (i, loc) in enumerate(locs)], g, pins
end
function mapped_graph(r::WeightedGadget)
raw = unweighted(r)
locs, g, pins = mapped_graph(raw)
return [_mul_weight(loc, r.mapped_weights[i]) for (i, loc) in enumerate(locs)], g, pins
end
_mul_weight(node::UnWeightedNode, factor) = Node(node..., factor)
for (T, centerloc) in [(:Turn, (2, 3)), (:Branch, (2, 3)), (:BranchFix, (3, 2)), (:BranchFixB, (3, 2)), (:WTurn, (3, 3)), (:EndTurn, (1, 2))]
@eval source_centers(::WeightedGadget{<:$T}) = [cross_location($T()) .+ (0, 1)]
@eval mapped_centers(::WeightedGadget{<:$T}) = [$centerloc]
end
# default to having no source center!
source_centers(::WeightedGadget) = Tuple{Int,Int}[]
mapped_centers(::WeightedGadget) = Tuple{Int,Int}[]
for T in [:(RotatedGadget{<:WeightedGadget}), :(ReflectedGadget{<:WeightedGadget})]
@eval function source_centers(r::$T)
cross = cross_location(r.gadget)
return map(loc->loc .+ _get_offset(r), _apply_transform.(Ref(r), source_centers(r.gadget), Ref(cross)))
end
@eval function mapped_centers(r::$T)
cross = cross_location(r.gadget)
return map(loc->loc .+ _get_offset(r), _apply_transform.(Ref(r), mapped_centers(r.gadget), Ref(cross)))
end
end
Base.size(r::WeightedGadget) = size(unweighted(r))
cross_location(r::WeightedGadget) = cross_location(unweighted(r))
iscon(r::WeightedGadget) = iscon(unweighted(r))
connected_nodes(r::WeightedGadget) = connected_nodes(unweighted(r))
vertex_overhead(r::WeightedGadget) = vertex_overhead(unweighted(r))
"""
map_weights(r::MappingResult{WeightedNode}, source_weights)
Map the weights in the source graph to weights in the mapped graph, returns a vector.
"""
function map_weights(r::MappingResult{<:WeightedNode{T1}}, source_weights::AbstractVector{T}) where {T1,T}
if !all(w -> 0 <= w <= 1, source_weights)
error("all weights must be in range [0, 1], got: $(source_weights)")
end
weights = promote_type(T1,T).(getfield.(r.grid_graph.nodes, :weight))
locs = getfield.(r.grid_graph.nodes, :loc)
center_indices = map(loc->findfirst(==(loc), locs), trace_centers(r))
weights[center_indices] .+= source_weights
return weights
end
# mapping configurations back
function move_center(w::WeightedGadgetTypes, nodexy, offset)
for (sc, mc) in zip(source_centers(w), mapped_centers(w))
if offset == sc
return nodexy .+ mc .- sc # found
end
end
error("center not found, source center = $(source_centers(w)), while offset = $(offset)")
end
trace_centers(r::MappingResult) = trace_centers(r.lines, r.padding, r.mapping_history)
function trace_centers(lines, padding, tape)
center_locations = map(x->center_location(x; padding) .+ (0, 1), lines)
for (gadget, i, j) in tape
m, n = size(gadget)
for (k, centerloc) in enumerate(center_locations)
offset = centerloc .- (i-1,j-1)
if 1<=offset[1] <= m && 1<=offset[2] <= n
center_locations[k] = move_center(gadget, centerloc, offset)
end
end
end
return center_locations[sortperm(getfield.(lines, :vertex))]
end
function _map_configs_back(r::MappingResult{<:WeightedNode}, configs::AbstractVector)
center_locations = trace_centers(r)
res = [zeros(Int, length(r.lines)) for i=1:length(configs)]
for (ri, c) in zip(res, configs)
for (i, loc) in enumerate(center_locations)
ri[i] = c[loc...]
end
end
return res
end
# simple rules for crossing gadgets
for (GT, s1, m1, s3, m3) in [(:(Cross{true}), [], [], [], []), (:(Cross{false}), [], [], [], []),
(:(WTurn), [], [], [], []), (:(BranchFix), [], [], [], []), (:(Turn), [], [], [], []),
(:(TrivialTurn), [1, 2], [1, 2], [], []), (:(BranchFixB), [1], [1], [], []),
(:(EndTurn), [3], [1], [], []), (:(TCon), [2], [2], [], []),
(:(Branch), [], [], [4], [2]),
]
@eval function weighted(g::$GT)
slocs, sg, spins = source_graph(g)
mlocs, mg, mpins = mapped_graph(g)
sw, mw = fill(2, length(slocs)), fill(2, length(mlocs))
sw[$(s1)] .= 1
sw[$(s3)] .= 3
mw[$(m1)] .= 1
mw[$(m3)] .= 3
return weighted(g, sw, mw)
end
end
const crossing_ruleset_weighted = weighted.(crossing_ruleset)
get_ruleset(::Weighted) = crossing_ruleset_weighted | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 1363 | # Reference
# ----------------------------
# Coudert, D., Mazauric, D., & Nisse, N. (2014).
# Experimental evaluation of a branch and bound algorithm for computing pathwidth.
# https://doi.org/10.1007/978-3-319-07959-2_5
function branch_and_bound(G::AbstractGraph)
branch_and_bound!(G, Layout(G, Int[]), Layout(G, collect(vertices(G))), Dict{Layout{Int},Bool}())
end
# P is the prefix
# vs is its vertex seperation of L
function branch_and_bound!(G::AbstractGraph, P::Layout, L::Layout, vP::Dict)
V = collect(vertices(G))
if (vsep(P) < vsep(L)) && !haskey(vP, P)
P2 = greedy_exact(G, P)
vsep_P2 = vsep(P2)
if sort(vertices(P2)) == V && vsep_P2 < vsep(L)
return P2
else
current = vsep(L)
remaining = vcat(P2.neighbors, P2.disconnected)
vsep_order = sortperm([vsep_updated(G, P2, x) for x in remaining])
for v in remaining[vsep_order] # by increasing values of vsep(P2 ⊙ v)
if vsep_updated(G, P2, v) < vsep(L)
L3 = branch_and_bound!(G, ⊙(G, P2, v), L, vP)
if vsep(L3) < vsep(L)
L = L3
end
end
end
# update Layout table
vP[P] = !(vsep(L) < current && vsep(P) == vsep(L))
end
end
return L
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 1165 | function greedy_exact(G::AbstractGraph, P)
keepgoing = true
while keepgoing
keepgoing = false
for list in (P.disconnected, P.neighbors)
for v in list
if all(nb->nb ∈ P.vertices || nb ∈ P.neighbors, neighbors(G, v))
P = ⊙(G, P, v)
keepgoing = true
end
end
end
for v in P.neighbors
if count(nb -> nb ∉ P.vertices && nb ∉ P.neighbors, neighbors(G, v)) == 1
P = ⊙(G, P, v)
keepgoing = true
end
end
end
return P
end
function greedy_decompose(G::AbstractGraph)
P = Layout(G, Int[])
while true
P = greedy_exact(G, P)
if !isempty(P.neighbors)
P = greedy_step(G, P, P.neighbors)
elseif !isempty(P.disconnected)
P = greedy_step(G, P, P.disconnected)
else
break
end
end
return P
end
function greedy_step(G, P, list)
layouts = [⊙(G, P, v) for v in list]
costs = vsep.(layouts)
best_cost = minimum(costs)
return layouts[rand(findall(==(best_cost), costs))]
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 3413 | module PathDecomposition
using Graphs
export pathwidth, PathDecompositionMethod, MinhThiTrick, Greedy
struct Layout{T}
vertices::Vector{T}
vsep::Int
neighbors::Vector{T}
disconnected::Vector{T}
end
Base.hash(layout::Layout) = hash(layout.vertices)
function Base.:(==)(l::Layout, m::Layout)
l.vsep == m.vsep && l.vertices == m.vertices
end
function Layout(g::SimpleGraph, vertices)
vs, nbs = vsep_and_neighbors(g, vertices)
Layout(vertices, vs, nbs, setdiff(1:nv(g), nbs ∪ vertices))
end
function vsep_and_neighbors(G::SimpleGraph, vertices::AbstractVector{T}) where T
vs, nbs = 0, T[]
for i=1:length(vertices)
S = vertices[1:i]
nbs = [v for v in setdiff(Graphs.vertices(G), S) if any(s->has_edge(G, v, s), S)]
vsi = length(nbs)
if vsi > vs
vs = vsi
end
end
return vs, nbs
end
vsep(layout::Layout) = layout.vsep
vsep_last(layout::Layout) = length(layout.neighbors)
function vsep_updated(G::SimpleGraph, layout::Layout{T}, v::T) where T
vs = vsep_last(layout)
if v ∈ layout.neighbors
vs -= 1
end
for w in neighbors(G, v)
if w ∉ layout.vertices && w ∉ layout.neighbors
vs += 1
end
end
vs = max(vs, layout.vsep)
return vs
end
function vsep_updated_neighbors(G::SimpleGraph, layout::Layout{T}, v::T) where T
vs = vsep_last(layout)
nbs = copy(layout.neighbors)
disc = copy(layout.disconnected)
if v ∈ nbs
deleteat!(nbs, findfirst(==(v), nbs))
vs -= 1
else
deleteat!(disc, findfirst(==(v), disc))
end
for w in neighbors(G, v)
if w ∉ layout.vertices && w ∉ nbs
vs += 1
push!(nbs, w)
deleteat!(disc, findfirst(==(w), disc))
end
end
vs = max(vs, layout.vsep)
return vs, nbs, disc
end
# update the Layout by a single vertex
function ⊙(G::SimpleGraph, layout::Layout{T}, v::T) where T
vertices = [layout.vertices..., v]
vs_new, neighbors_new, disconnected = vsep_updated_neighbors(G, layout, v)
vs_new = max(layout.vsep, vs_new)
return Layout(vertices, vs_new, neighbors_new, disconnected)
end
Graphs.vertices(layout::Layout) = layout.vertices
##### Interfaces #####
abstract type PathDecompositionMethod end
"""
MinhThiTrick <: PathDecompositionMethod
A path decomposition method based on the Branching method.
In memory of Minh-Thi Nguyen, one of the main developers of this method.
She left us in a truck accident at her 24 years old.
- https://www.cbsnews.com/boston/news/cyclist-killed-minh-thi-nguyen-cambridge-bike-safety/
"""
struct MinhThiTrick <: PathDecompositionMethod end
"""
Greedy <: PathDecompositionMethod
A path decomposition method based on the Greedy method.
"""
Base.@kwdef struct Greedy <: PathDecompositionMethod
nrepeat::Int = 10
end
"""
pathwidth(g::AbstractGraph, method)
Compute the optimal path decomposition of graph `g`, returns a `Layout` instance.
`method` can be
* Greedy(; nrepeat=10)
* MinhThiTrick
"""
function pathwidth(g::AbstractGraph, ::MinhThiTrick)
return branch_and_bound(g)
end
function pathwidth(g::AbstractGraph, method::Greedy)
res = Layout{Int}[]
for _ = 1:method.nrepeat
push!(res, greedy_decompose(g))
end
return res[argmin(vsep.(res))]
end
include("greedy.jl")
include("branching.jl")
end
using .PathDecomposition | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 246 | using UnitDiskMapping, Test, Graphs
@testset "crossing lattice" begin
d = UnitDiskMapping.crossing_lattice(complete_graph(10), 1:10)
@test size(d) == (10,10)
@test d[1,1] == UnitDiskMapping.Block(-1, -1, -1, 1, -1)
println(d)
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 3549 | using Test, UnitDiskMapping
using GenericTensorNetworks, Graphs
using GenericTensorNetworks.OMEinsum.LinearAlgebra: triu
using Random
@testset "qubo" begin
n = 7
H = -randn(n) * 0.05
J = triu(randn(n, n) * 0.001, 1); J += J'
qubo = UnitDiskMapping.map_qubo(J, H)
@test show_pins(qubo) !== nothing
println(qubo)
graph, weights = UnitDiskMapping.graph_and_weights(qubo.grid_graph)
r1 = solve(GenericTensorNetwork(IndependentSet(graph, weights)), SingleConfigMax())[]
J2 = vcat([Float64[J[i,j] for j=i+1:n] for i=1:n]...)
# note the different sign convention of J
r2 = solve(GenericTensorNetwork(SpinGlass(complete_graph(n), -J2, H)), SingleConfigMax())[]
@test r1.n - qubo.mis_overhead ≈ r2.n
@test r1.n % 1 ≈ r2.n % 1
c1 = map_config_back(qubo, r1.c.data)
@test spinglass_energy(complete_graph(n), c1; J=-J2, h=H) ≈ spinglass_energy(complete_graph(n), r2.c.data; J=-J2, h=H)
#display(MappingGrid(UnitDiskMapping.CopyLine[], 0, qubo))
end
@testset "simple wmis" begin
for graphname in [:petersen, :bull, :cubical, :house, :diamond]
@show graphname
g0 = smallgraph(graphname)
n = nv(g0)
w0 = ones(n) * 0.01
wmis = UnitDiskMapping.map_simple_wmis(g0, w0)
graph, weights = UnitDiskMapping.graph_and_weights(wmis.grid_graph)
r1 = solve(GenericTensorNetwork(IndependentSet(graph, weights)), SingleConfigMax())[]
r2 = solve(GenericTensorNetwork(IndependentSet(g0, w0)), SingleConfigMax())[]
@test r1.n - wmis.mis_overhead ≈ r2.n
@test r1.n % 1 ≈ r2.n % 1
c1 = map_config_back(wmis, r1.c.data)
@test sum(c1 .* w0) == r2.n
end
end
@testset "restricted qubo" begin
n = 5
coupling = [
[(i,j,i,j+1,rand([-1,1])) for i=1:n, j=1:n-1]...,
[(i,j,i+1,j,rand([-1,1])) for i=1:n-1, j=1:n]...
]
qubo = UnitDiskMapping.map_qubo_restricted(coupling)
graph, weights = UnitDiskMapping.graph_and_weights(qubo.grid_graph)
r1 = solve(GenericTensorNetwork(IndependentSet(graph, weights)), SingleConfigMax())[]
weights = Int[]
g2 = SimpleGraph(n*n)
for (i,j,i2,j2,J) in coupling
add_edge!(g2, (i-1)*n+j, (i2-1)*n+j2)
push!(weights, J)
end
r2 = solve(GenericTensorNetwork(SpinGlass(g2, -weights)), SingleConfigMax())[]
@show r1, r2
end
@testset "square qubo" begin
Random.seed!(4)
m, n = 6, 6
coupling = [
[(i,j,i,j+1,0.01*randn()) for i=1:m, j=1:n-1]...,
[(i,j,i+1,j,0.01*randn()) for i=1:m-1, j=1:n]...
]
onsite = vec([(i, j, 0.01*randn()) for i=1:m, j=1:n])
qubo = UnitDiskMapping.map_qubo_square(coupling, onsite)
graph, weights = UnitDiskMapping.graph_and_weights(qubo.grid_graph)
r1 = solve(GenericTensorNetwork(IndependentSet(graph, weights)), SingleConfigMax())[]
# solve spin glass directly
g2 = SimpleGraph(m*n)
Jd = Dict{Tuple{Int,Int}, Float64}()
for (i,j,i2,j2,J) in coupling
edg = (i+(j-1)*m, i2+(j2-1)*m)
Jd[edg] = J
add_edge!(g2, edg...)
end
Js, hs = Float64[], zeros(Float64, nv(g2))
for e in edges(g2)
push!(Js, Jd[(e.src, e.dst)])
end
for (i,j,h) in onsite
hs[i+(j-1)*m] = h
end
r2 = solve(GenericTensorNetwork(SpinGlass(g2, -Js, hs)), SingleConfigMax())[]
@test r1.n - qubo.mis_overhead ≈ r2.n
c1 = map_config_back(qubo, collect(Int,r1.c.data))
c2 = collect(r2.c.data)
@test spinglass_energy(g2, c1; J=-Js, h=hs) ≈ spinglass_energy(g2, c2; J=-Js, h=hs)
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 1030 | using UnitDiskMapping, Test
using GenericTensorNetworks
@testset "map results back" begin
for s in [UnitDiskMapping.crossing_ruleset..., UnitDiskMapping.simplifier_ruleset...]
_, g0, pins0 = source_graph(s)
locs, g, pins = mapped_graph(s)
d1 = UnitDiskMapping.mapped_entry_to_compact(s)
d2 = UnitDiskMapping.source_entry_to_configs(s)
m = solve(GenericTensorNetwork(IndependentSet(g), openvertices=pins), ConfigsMax())
t = solve(GenericTensorNetwork(IndependentSet(g0), openvertices=pins0), SizeMax())
for i in eachindex(m)
for v in m[i].c.data
bc = UnitDiskMapping.mapped_boundary_config(s, v)
compact_bc = d1[bc]
sc = d2[compact_bc]
for sbc in sc
ss = UnitDiskMapping.source_boundary_config(s, sbc)
@test ss == compact_bc
@test count(isone, sbc) == Int(t[compact_bc+1].n)
end
end
end
end
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 1119 | using UnitDiskMapping, Test
using GenericTensorNetworks
using GenericTensorNetworks: content
using Graphs
@testset "gadgets" begin
for s in [UnitDiskMapping.crossing_ruleset..., UnitDiskMapping.simplifier_ruleset...]
println("Testing gadget:\n$s")
locs1, g1, pins1 = source_graph(s)
locs2, g2, pins2 = mapped_graph(s)
@assert length(locs1) == nv(g1)
m1 = mis_compactify!(solve(GenericTensorNetwork(IndependentSet(g1), openvertices=pins1), SizeMax()))
m2 = mis_compactify!(solve(GenericTensorNetwork(IndependentSet(g2), openvertices=pins2), SizeMax()))
@test nv(g1) == length(locs1) && nv(g2) == length(locs2)
sig, diff = UnitDiskMapping.is_diff_by_const(content.(m1), content.(m2))
@test diff == -mis_overhead(s)
@test sig
end
end
@testset "rotated_and_reflected" begin
@test length(rotated_and_reflected(UnitDiskMapping.DanglingLeg())) == 4
@test length(rotated_and_reflected(Cross{false}())) == 4
@test length(rotated_and_reflected(Cross{true}())) == 4
@test length(rotated_and_reflected(BranchFixB())) == 8
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
|
[
"Apache-2.0"
] | 0.4.0 | 0736fd885e445c13b7b58e31d1ccc37a4950f7df | code | 595 | using UnitDiskMapping, Test
using GenericTensorNetworks
@testset "gates" begin
for (f, gate) in [(!, :NOT), (⊻, :XOR), ((a, b)->a && b, :AND),
((a, b)->a || b, :OR), ((a, b)->!(a || b), :NOR), ((a, b)->!(a ⊻ b), :NXOR)]
@info gate
g, inputs, outputs = gate_gadget(Gate(gate))
@test UnitDiskMapping.truth_table(Gate(gate)) do graph, weights
collect.(Int, solve(GenericTensorNetwork(IndependentSet(graph, weights)), ConfigsMax())[].c.data)
end == [f([x>>(i-1) & 1 == 1 for i=1:length(inputs)]...) for x in 0:1<<length(inputs)-1]
end
end | UnitDiskMapping | https://github.com/QuEraComputing/UnitDiskMapping.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.