licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.0 | cb40ca3d7a82c1b5e767e9f099b4a85083a73f69 | code | 595 | using Indicomb
using Documenter
DocMeta.setdocmeta!(Indicomb, :DocTestSetup, :(using Indicomb); recursive=true)
makedocs(;
modules=[Indicomb],
authors="Jerry Ling <[email protected]> and contributors",
repo="https://github.com/Moelf/Indicomb.jl/blob/{commit}{path}#{line}",
sitename="Indicomb.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://Moelf.github.io/Indicomb.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/Moelf/Indicomb.jl",
)
| Indicomb | https://github.com/Moelf/Indicomb.jl.git |
|
[
"MIT"
] | 0.1.0 | cb40ca3d7a82c1b5e767e9f099b4a85083a73f69 | code | 3694 | module Indicomb
export get_events_catnum_name
using JSON3, HTTP, Dates, SHA
import DataStructures: OrderedDict
import HTTP: escapeuri
import Base.Threads: @threads
_format_dt(dt) = Dates.format(dt, dateformat"y-m-d")
"""
get_events_catnum_name(baseurl, cat_num, evt_name; params...)
Main API for basic users, given base url (Indico server domain), category number and event title filter,
return a list of (`detail=subcontributions`) events JSON object.
# Example
```
julia> get_events_catnum_name("https://indico.cern.ch", 1X3X, "XXXXX"; from="2021-06-10", to="2021-06-30", apikey=".....", secretkey="....");
JSON3.Object{Vector{UInt8}, SubArray{UInt64, 1, Vector{UInt64}, Tuple{UnitRange{Int64}}, true}} with 29 entries:
:_type => "Conference"
:id => "10521XX"
:title => "XXXXX group meeting"
:description => ""
:startDate => {…
:timezone => "Europe/Zurich"
:endDate => {…
:room => ""
:location => ""
:address => ""
...
```
"""
function get_events_catnum_name(
baseurl,
cat_num,
evt_name;
params...,
)
# get category JSON which has events in simplr form
cat = get_indico_page(baseurl, "/export/categ/$cat_num.json"; params...)
simple_evts = filter_events(evt_name, cat)
res = Vector{JSON3.Object}(undef, length(simple_evts))
# add event specific headers to into original `params` to get full info
detail_params = (pretty="yes", detail="subcontributions", params...)
@threads for idx in eachindex(simple_evts)
id = simple_evts[idx][:id]
evt = get_indico_page(baseurl, "/export/event/$id.json"; detail_params...)
# stupid Indico always return bloated/nested JSON, unpact here!
res[idx] = only(evt[:results]) #`only` because we're quering a single event
end
return res
end
"""
indico_request(path; params...)
Build a `path?params` url object (without the base domain). The key feature is to construct `signature=` query
based on the SHA1 HMAC algo. The construction of `signature` only cares about path, not base-site domain.
"""
function indico_request(path; params...)
d = OrderedDict{Symbol,Any}(params) # don't care about performance
if :secretkey ∈ keys(d)
# key is `secret_key` and data is entire url except signature=
_now = Dates.value(now(UTC)) - Dates.UNIXEPOCH
d[:timestamp] = _now ÷ 1000# ms -> s
sort!(d)
temp_url = "$path?$(escapeuri(d))"
sig = bytes2hex(sha1_hmac(d[:secretkey], temp_url))
d[:signature] = sig
end
return "$path?$(escapeuri(d))"
end
function get_indico_page(baseurl, path; params...)
_u = "$baseurl$(indico_request(path; params...))"
return JSON3.read(HTTP.get(_u).body)
end
function filter_events(f::Function, category)
return filter(f, category[:results])
end
function filter_events(title_name::Union{AbstractString,Regex}, category)
return filter_events(x -> contains(x[:title], title_name), category)
end
# performe a one-time update and then return digest
"""
sha1_hmac(key, data)
Return a 1-cycle daupted `digest` given key and data.
# Example
```
julia> bytes2hex(Indicomb.sha1_hmac("123", "Julia"))
"eca18118a715a32ab5f340f57d917c1e2eec96f5"
```
!!! note
Indico uses *sorted*, query parameters as data. i.e.:
Given: `"https://cern.ch/export/categ/1135.json?from=2020&apikey=8"`, `data` into this function will be
`"apikey=8&from=2020"` (string from comes from `HTTP.escapeuri`).
"""
function sha1_hmac(key, data)
h = HMAC_CTX(SHA1_CTX(), Vector{UInt8}(key))
update!(h, Vector{UInt8}(data))
return digest!(h)
end
end
| Indicomb | https://github.com/Moelf/Indicomb.jl.git |
|
[
"MIT"
] | 0.1.0 | cb40ca3d7a82c1b5e767e9f099b4a85083a73f69 | code | 833 | using Indicomb
using Test, JSON3
@testset "Indico main function" begin
t = get_events_catnum_name("https://indico.cern.ch", 6725, r"."; from="2021-06-01", to="2021-06-15")
@test length(t) == 14
t = get_events_catnum_name("https://indico.cern.ch", 6725, "Edition"; from="2021-06-01", to="2021-06-15")
@test length(t) == 1
@test contains(t[1][:title], "Edition of the Large")
end
@testset "Indico utility functions" begin
re = Indicomb.indico_request("cern.ch/"; p = 3)
@test re == "cern.ch/?p=3"
t = Indicomb.get_indico_page(
"https://indico.cern.ch",
"/export/categ/6725.json";
from="2021-06-01",
to="2021-06-15",
)
@test t[:count] == 14
@test t[:results][1][:_type] == "Conference"
@test contains(t[:results][1][:title], "Edition of the Large")
end
| Indicomb | https://github.com/Moelf/Indicomb.jl.git |
|
[
"MIT"
] | 0.1.0 | cb40ca3d7a82c1b5e767e9f099b4a85083a73f69 | docs | 2213 | # Indicomb
[](https://Moelf.github.io/Indicomb.jl/stable)
[](https://Moelf.github.io/Indicomb.jl/dev)
[](https://github.com/Moelf/Indicomb.jl/actions)
# Quick Start
At the most top level, this pkg export a single function `get_events_cat_name`, that can be used to get (detailed) Indico events within
a category number, by events' `:title`:
```
julia> t = get_events_catnum_name("https://indico.cern.ch", 1X3X, "XXXXX"; from="2021-06-10", to="2021-06-30", apikey=".....", secretkey="....");
julia> t[1]
JSON3.Object{Vector{UInt8}, SubArray{UInt64, 1, Vector{UInt64}, Tuple{UnitRange{Int64}}, true}} with 29 entries:
:_type => "Conference"
:id => "10521XX"
:title => "XXXXX group meeting"
:description => ""
:startDate => {…
:timezone => "Europe/Zurich"
:endDate => {…
:room => ""
:location => ""
:address => ""
:type => "meeting"
:references => Union{}[]
:_fossil => "conferenceMetadataWithSubContribs"
:categoryId => 1X3X
:category => "Harvard University"
:note => {…
:roomFullname => ""
:url => "https://indico.cern.ch/event/10521XX/"
:creationDate => {…
:creator => {…
:hasAnyProtection => true
:roomMapURL => nothing
:folders => JSON3.Object[{…
:chairs => Union{}[]
:material => JSON3.Object[{…
:keywords => Union{}[]
:visibility => {…
:allowed => {…
:contributions => JSON3.Object[{…
```
There are lower level stuff (more like utility functons I guess) in case you want to hack around. Feel free to raise quality of life improvement feature request.
```
julia> Indicomb.get_indico_page("https://indico.cern.ch/", "/export/categ/1135.json";apikey=".....", secret_key="xxxxxx", from="2021-06-01", to="2030-06-01")
HTTP.Messages.Response:
"""
HTTP/1.1 200 OK
```
# TODO
- [ ] Ship a HTML page generation script and a CSS
| Indicomb | https://github.com/Moelf/Indicomb.jl.git |
|
[
"MIT"
] | 0.1.0 | cb40ca3d7a82c1b5e767e9f099b4a85083a73f69 | docs | 178 | ```@meta
CurrentModule = Indicomb
```
# Indicomb
Documentation for [Indicomb](https://github.com/Jerry Ling/Indicomb.jl).
```@index
```
```@autodocs
Modules = [Indicomb]
```
| Indicomb | https://github.com/Moelf/Indicomb.jl.git |
|
[
"MIT"
] | 0.1.5 | 45fefd4a5a6e80231e99f8dbb6bb1447dfd358b5 | code | 854 | module OutlierDetectionNeighbors
using OutlierDetectionInterface
using OutlierDetectionInterface:SCORE_UNSUPERVISED
const OD = OutlierDetectionInterface
import NearestNeighbors
const NN = NearestNeighbors
import Distances
const DI = Distances
include("utils.jl")
include("models/abod.jl")
include("models/cof.jl")
include("models/dnn.jl")
include("models/knn.jl")
include("models/lof.jl")
const UUID = "51249a0a-cb36-4849-8e04-30c7f8d311bb"
const MODELS = [:ABODDetector,
:COFDetector,
:DNNDetector,
:KNNDetector,
:LOFDetector]
for model in MODELS
@eval begin
OD.@default_frontend $model
OD.@default_metadata $model $UUID
export $model
end
end
end
| OutlierDetectionNeighbors | https://github.com/OutlierDetectionJL/OutlierDetectionNeighbors.jl.git |
|
[
"MIT"
] | 0.1.5 | 45fefd4a5a6e80231e99f8dbb6bb1447dfd358b5 | code | 4756 | const K_PARAM = """ k::Integer
Number of neighbors (must be greater than 0)."""
const KNN_PARAMS = """ metric::Metric
This is one of the Metric types defined in the Distances.jl package. It is possible to define your own metrics by
creating new types that are subtypes of Metric.
algorithm::Symbol
One of `(:kdtree, :balltree)`. In a `kdtree`, points are recursively split into groups using hyper-planes.
Therefore a KDTree only works with axis aligned metrics which are: Euclidean, Chebyshev, Minkowski and Cityblock.
A *brutetree* linearly searches all points in a brute force fashion and works with any Metric. A *balltree*
recursively splits points into groups bounded by hyper-spheres and works with any Metric.
static::Union{Bool, Symbol}
One of `(true, false, :auto)`. Whether the input data for fitting and transform should be statically or dynamically
allocated. If `true`, the data is statically allocated. If `false`, the data is dynamically allocated. If `:auto`,
the data is dynamically allocated if the product of all dimensions except the last is greater than 100.
leafsize::Int
Determines at what number of points to stop splitting the tree further. There is a trade-off between traversing the
tree and having to evaluate the metric function for increasing number of points.
reorder::Bool
While building the tree this will put points close in distance close in memory since this helps with cache locality.
In this case, a copy of the original data will be made so that the original data is left unmodified. This can have a
significant impact on performance and is by default set to true.
parallel::Bool
Parallelize `score` and `predict` using all threads available. The number of threads can be set with the
`JULIA_NUM_THREADS` environment variable. Note: `fit` is not parallel."""
# for some reason providing this as a function is massively slowing down compilation
macro tree(detector, X)
esc(quote
if $detector.algorithm === :kdtree
NN.KDTree($X, $detector.metric; $detector.leafsize, $detector.reorder)
elseif $detector.algorithm === :balltree
NN.BallTree($X, $detector.metric; $detector.leafsize, $detector.reorder)
elseif $detector.algorithm === :brutetree
# intentionally not propagating leafsize and reorder, because that may lead to problems
NN.BruteTree($X, $detector.metric)
end
end)
end
function knn_parallel(tree::NN.NNTree, X::AbstractVector{<:AbstractVector},
k::Int, ignore_self = false, sort::Bool = false)
# pre-allocate the result arrays (as in NearestNeighbors.jl)
indices = eachindex(X)
dists = [Vector{NN.get_T(eltype(X))}(undef, k) for _ in indices]
idxs = [Vector{Int}(undef, k) for _ in indices]
# get number of threads
nThreads = Threads.nthreads()
# partition the input array equally
n_samples = length(X)
divides_data = mod(n_samples, nThreads) == 0
partition_size = divides_data ? n_samples ÷ nThreads : n_samples ÷ nThreads + 1
partitions = Iterators.partition(indices, partition_size)
# create the knn function depending on whether we need to ignore self
knn_closure(tree, X, k, sort) = ignore_self ? _knn_others(tree, X, k) : NN.knn(tree, X, k, sort)
# parallel computation over the equal array splits
Threads.@threads for idx = collect(partitions)
@inbounds idxs[idx], dists[idx] = knn_closure(tree, X[idx], k, sort)
end
idxs, dists
end
function knn_sequential(tree::NN.NNTree, X::AbstractVector{<:AbstractVector},
k::Int, ignore_self = false, sort::Bool = false)
ignore_self ? _knn_others(tree, X, k) : NN.knn(tree, X, k, sort)
end
# Calculate the k-nearest neighbors with ignoring the own point in the tree.
function _knn_others(tree::NN.NNTree, X::AbstractVector, k::Integer)
idxs, dists = NN.knn(tree, X, k + 1, true) # we ignore the distance to the 'self' point, important to sort!
ignore_self = vecvec -> map(vec -> vec[2:end], vecvec)
ignore_self(idxs), ignore_self(dists)
end
# The NN package automatically converts matrices to a vector of points (static vectors) for improved performance
# this results in very bad performance for high-dimensional matrices (e.g. d > 100).
dynamic_view(X::Data) = [NN.SizedVector{length(v)}(v) for v in eachslice(X; dims = ndims(X))]
static_view(X::Data) = [NN.SVector{length(v)}(v) for v in eachslice(X; dims = ndims(X))]
auto_view(X::Data) = prod(size(X)[1:end-1]) > 100 ? dynamic_view(X) : static_view(X)
function prepare_data(X::Data, static::Union{Bool,Symbol})
@assert ndims(X) == 2 "k-NN currently only supports matrices."
return static === :auto ? auto_view(X) :
static ? static_view(X) : dynamic_view(X)
end
| OutlierDetectionNeighbors | https://github.com/OutlierDetectionJL/OutlierDetectionNeighbors.jl.git |
|
[
"MIT"
] | 0.1.5 | 45fefd4a5a6e80231e99f8dbb6bb1447dfd358b5 | code | 5250 | using Combinatorics: combinations
using LinearAlgebra: dot, norm
using Statistics: var
"""
ABODDetector(k = 5,
metric = Euclidean(),
algorithm = :kdtree,
static = :auto,
leafsize = 10,
reorder = true,
parallel = false,
enhanced = false)
Determine outliers based on the angles to its nearest neighbors. This implements the `FastABOD` variant described in
the paper, that is, it uses the variance of angles to its nearest neighbors, not to the whole dataset, see [1].
*Notice:* The scores are inverted, to conform to our notion that higher scores describe higher outlierness.
Parameters
----------
$K_PARAM
$KNN_PARAMS
enhanced::Bool
When `enhanced=true`, it uses the enhanced ABOD (EABOD) adaptation proposed by [2].
Examples
--------
$(SCORE_UNSUPERVISED("ABODDetector"))
References
----------
[1] Kriegel, Hans-Peter; S hubert, Matthias; Zimek, Arthur (2008): Angle-based outlier detection in high-dimensional
data.
[2] Li, Xiaojie; Lv, Jian Cheng; Cheng, Dongdong (2015): Angle-Based Outlier Detection Algorithm with More Stable
Relationships.
"""
OD.@detector mutable struct ABODDetector <: UnsupervisedDetector
# Note: the minimum k is 3. The 2 nearest neighbors yields one angle, which implies zero variance everywhere.
k::Integer = 5::(_ > 2)
metric::DI.Metric = DI.Euclidean()
algorithm::Symbol = :kdtree::(_ in (:kdtree, :balltree, :brutetree))
static::Union{Bool,Symbol} = :auto::(_ in (true, false, :auto))
leafsize::Integer = 10::(_ ≥ 0)
reorder::Bool = true
parallel::Bool = false
enhanced::Bool = false
end
struct ABODModel <: DetectorModel
# We have to store the tree to efficiently retrieve the indices to the nearest neighbors. Additionally, we have to
# store the raw training data `X` for later angle calculations.
X::AbstractArray
tree::NN.NNTree
end
function OD.fit(detector::ABODDetector, X::Data; verbosity)::Fit
Xprep = prepare_data(X, detector.static)
# create the specified tree
tree = @tree detector Xprep
# use tree to calculate distances
idxs, _ = detector.parallel ?
knn_parallel(tree, Xprep, detector.k, true) :
knn_sequential(tree, Xprep, detector.k, true)
scores = detector.enhanced ? _eabod(X, X, idxs, detector.k) : _abod(X, X, idxs, detector.k)
return ABODModel(X, tree), scores
end
function OD.transform(detector::ABODDetector, model::ABODModel, X::Data)::Scores
Xprep = prepare_data(X, detector.static)
idxs, _ = detector.parallel ?
knn_parallel(model.tree, Xprep, detector.k) :
knn_sequential(model.tree, Xprep, detector.k)
return detector.enhanced ? _eabod(X, model.X, idxs, detector.k) : _abod(X, model.X, idxs, detector.k)
end
function _abod(X::AbstractArray, Xtrain::AbstractArray, idxs::AbstractVector, k::Int)::Scores
# Calculate the ABOF for all instances in X.
scores = Vector{Float64}(undef, length(idxs))
for i in eachindex(idxs)
@inbounds scores[i] = _abof(X[:, i], idxs[i], Xtrain, k)
end
scores
end
function _abof(p::AbstractVector, idxs::AbstractVector, X::AbstractArray, k::Int)::Real
# Calculate the angle-based outlier factor (ABOF). The ABOF is the variance over the angles between the difference
# vectors of a point `p` to all pairs of points in its nearest neighbors weighted by the distance of the points.
# all two-neighbor combinations
combs = combinations(idxs, 2)
# we know that there are binomial(k, 2) results for all two-neighbor combinations and can thus pre-allocate
result = Vector{Float64}(undef, binomial(k, 2))
for (i, (idx1, idx2)) in enumerate(combs)
neighbor1, neighbor2 = p .- X[:, idx1], p .- X[:, idx2]
@inbounds result[i] = dot(neighbor1, neighbor2) / (norm(neighbor1)^2 * norm(neighbor2)^2)
end
# NaN means that at least one norm was zero, we use -1, because higher scores should describe outlierness
-1 * var(Iterators.filter(!isnan, result))
end
function _eabod(X::AbstractArray, Xtrain::AbstractArray, idxs::AbstractVector, k::Int)::Scores
# Calculate the EABOF for all instances in X.
scores = Vector{Float64}(undef, length(idxs))
for i in eachindex(idxs)
@inbounds scores[i] = _eabof(X[:, i], idxs[i], Xtrain, k)
end
scores
end
function _eabof(p::AbstractVector, idxs::AbstractVector, X::AbstractArray, k::Int)::Real
# Calculate the enhanced angle-based outlier factor (EABOF).
# all two-neighbor combinations
combs = combinations(idxs, 2)
# we know that there are binomial(k, 2) results for all two-neighbor combinations and can thus pre-allocate
result = Vector{Float64}(undef, binomial(k, 2))
for (i, (idx1, idx2)) in enumerate(combs)
neighbor1, neighbor2 = p .- X[:, idx1], p .- X[:, idx2]
norm1, norm2 = norm(neighbor1), norm(neighbor2)
@inbounds result[i] = (1 / (norm1 + norm2)) * (dot(neighbor1, neighbor2) / (norm1^2 * norm2^2))
end
# NaN means that at least one norm was zero, we use -1, because higher scores should describe outlierness
-1 * var(Iterators.filter(!isnan, result))
end
| OutlierDetectionNeighbors | https://github.com/OutlierDetectionJL/OutlierDetectionNeighbors.jl.git |
|
[
"MIT"
] | 0.1.5 | 45fefd4a5a6e80231e99f8dbb6bb1447dfd358b5 | code | 4347 | """
COFDetector(k = 5,
metric = Euclidean(),
algorithm = :kdtree,
leafsize = 10,
reorder = true,
parallel = false)
Local outlier density based on chaining distance between graphs of neighbors, as described in [1].
Parameters
----------
$K_PARAM
$KNN_PARAMS
Examples
--------
$(SCORE_UNSUPERVISED("COFDetector"))
References
----------
[1] Tang, Jian; Chen, Zhixiang; Fu, Ada Wai-Chee; Cheung, David Wai-Lok (2002): Enhancing Effectiveness of Outlier
Detections for Low Density Patterns.
"""
OD.@detector mutable struct COFDetector <: UnsupervisedDetector
k::Integer = 5::(_ > 0)
metric::DI.Metric = DI.Euclidean()
algorithm::Symbol = :kdtree::(_ in (:kdtree, :balltree, :brutetree))
static::Union{Bool,Symbol} = :auto::(_ in (true, false, :auto))
leafsize::Integer = 10::(_ ≥ 0)
reorder::Bool = true
parallel::Bool = false
end
struct COFModel <: DetectorModel
# An efficient COF prediction requires us to store the full pairwise distance matrix of the training examples in
# addition to the learned tree as well as the ACDs of the training examples.
tree::NN.NNTree
pdists::AbstractArray
acds::AbstractVector
end
function OD.fit(detector::COFDetector, X::Data; verbosity)::Fit
# calculate pairwise distances in addition to building the tree;
# TODO: we could remove this once NearestNeighbors.jl exports something like `allpairs`
pdists = DI.pairwise(detector.metric, X, dims = 2)
# Note: Fitting is different from pyOD, because we ignore the trivial nearest neighbor using knn_others as in
# all other nearest-neighbor-based algorithms
X = prepare_data(X, detector.static)
# use tree to calculate distances
tree = @tree detector X
# we need k + 1 neighbors to calculate the chaining distance and have to make sure the indices are sorted
idxs, _ = detector.parallel ?
knn_parallel(tree, X, detector.k + 1, true) :
knn_sequential(tree, X, detector.k + 1, true)
acds = _acd(idxs, pdists, detector.k)
scores = _cof(idxs, acds, detector.k)
return COFModel(tree, pdists, acds), scores
end
function OD.transform(detector::COFDetector, model::COFModel, X::Data)::Scores
X = prepare_data(X, detector.static)
# Note: It's important to sort the neighbors, because _calc_acds depends on the order of the neighbors
idxs, _ = detector.parallel ?
knn_parallel(model.tree, X, detector.k + 1, false, true) :
knn_sequential(model.tree, X, detector.k + 1, false, true)
return _cof(idxs, model.pdists, model.acds, detector.k)
end
function _cof(idxs::AbstractVector{<:AbstractVector}, acds::AbstractVector, k::Int)::Scores
# Calculate the connectivity-based outlier factor from given acds
cof = Vector{Float64}(undef, length(idxs))
for (i, idx) in enumerate(idxs)
@inbounds cof[i] = (acds[i] * k) / sum(acds[idx[2:end]])
end
cof
end
function _cof(idxs::AbstractVector{<:AbstractVector}, pdists::AbstractMatrix, acds::AbstractVector, k::Int)::Scores
# Calculate the connectivity-based outlier factor for test examples with given training distances and acds.
cof = Vector{Float64}(undef, length(idxs))
acdsTest = _acd(idxs, pdists, k)
for (i, idx) in enumerate(idxs)
@inbounds cof[i] = (acdsTest[i] * k) / sum(acdsTest[idx[2:end]])
end
cof
end
function _acd(idxs::AbstractVector{<:AbstractVector}, pdists::AbstractMatrix, k::Int)::Vector{Float64}
# Initialize with zeros because we add to each entry
acds = zeros(Float64, length(idxs))
kplus1 = k + 1
for (i, idx) in enumerate(idxs)
for j in 1:k
# calculate the minimum distance (from all reachable points). That is, we sort the distances of a specific
# point (given by idx[j+1]) according to the order of the current idx, where idx[1] specifies the idx of the
# nearest neighbors and idx[k] specifies the idx of the k-th neighbor. We then restrict this so-called
# set-based nearest path (SBN) to the points that are reachable with [begin:j]
cost = minimum(pdists[idx, idx[j+1]][begin:j])
@inbounds acds[i] += ((2.0 * (kplus1 - j)) / (k * kplus1)) * cost
end
end
acds
end
| OutlierDetectionNeighbors | https://github.com/OutlierDetectionJL/OutlierDetectionNeighbors.jl.git |
|
[
"MIT"
] | 0.1.5 | 45fefd4a5a6e80231e99f8dbb6bb1447dfd358b5 | code | 3306 | """
DNNDetector(d = 0,
metric = Euclidean(),
algorithm = :kdtree,
leafsize = 10,
reorder = true,
parallel = false)
Anomaly score based on the number of neighbors in a hypersphere of radius `d`. Knorr et al. [1] directly converted the
resulting outlier scores to labels, thus this implementation does not fully reflect the approach from the paper.
Parameters
----------
d::Real
The hypersphere radius used to calculate the global density of an instance.
$KNN_PARAMS
Examples
--------
$(SCORE_UNSUPERVISED("DNNDetector"))
References
----------
[1] Knorr, Edwin M.; Ng, Raymond T. (1998): Algorithms for Mining Distance-Based Outliers in Large Datasets.
"""
OD.@detector mutable struct DNNDetector <: UnsupervisedDetector
metric::DI.Metric = DI.Euclidean()
algorithm::Symbol = :kdtree::(_ in (:kdtree, :balltree, :brutetree))
static::Union{Bool,Symbol} = :auto::(_ in (true, false, :auto))
leafsize::Integer = 10::(_ ≥ 0)
reorder::Bool = true
parallel::Bool = false
d::Real = 0::(_ > 0) # warns if `d` is not set
end
struct DNNModel <: DetectorModel
tree::NN.NNTree
end
function OD.fit(detector::DNNDetector, X::Data; verbosity)::Fit
X = prepare_data(X, detector.static)
# create the specified tree
tree = @tree detector X
# use tree to calculate distances
scores = detector.parallel ?
dnn_parallel(tree, X, detector.d, true) :
dnn_sequential(tree, X, detector.d, true)
return DNNModel(tree), scores
end
function OD.transform(detector::DNNDetector, model::DNNModel, X::Data)::Scores
X = prepare_data(X, detector.static)
return detector.parallel ?
dnn_parallel(model.tree, X, detector.d) :
dnn_sequential(model.tree, X, detector.d)
end
@inline function _dnn(idxs::AbstractVector{<:AbstractVector})::Scores
# Helper function to reduce the instances to a global density score.
1 ./ (length.(idxs) .+ 0.1) # min score = 0, max_score = 10
end
@inline function _dnn_others(idxs::AbstractVector{<:AbstractVector})::Scores
# Remove the (self) point previously added when fitting the tree, otherwise during `fit`, that point would always
# be included in the density estimation
1 ./ (length.(idxs) .- 0.9) # 1 - 0.1
end
function dnn_sequential(tree::NN.NNTree, X::AbstractVector{<:AbstractVector}, d::Real, ignore_self::Bool = false)
# returns a vector of vectors containing the nearest indices
idxs = NN.inrange(tree, X, d, false)
ignore_self ? _dnn_others(idxs) : _dnn(idxs)
end
function dnn_parallel(tree::NN.NNTree, X::AbstractVector{<:AbstractVector}, d::Real, ignore_self::Bool = false)
# returns a vector of vectors containing the nearest indices
samples = length(X)
scores = Vector{Float64}(undef, samples)
# get number of threads
nThreads = Threads.nthreads()
# partition the input array equally
partition_size = samples ÷ nThreads + 1
partitions = Iterators.partition(eachindex(X), partition_size)
dnn_closure(idxs) = ignore_self ? _dnn_others(idxs) : _dnn(idxs)
Threads.@threads for idx = collect(partitions)
@inbounds scores[idx] = dnn_closure(NN.inrange(tree, X[idx], d, false))
end
return scores
end
| OutlierDetectionNeighbors | https://github.com/OutlierDetectionJL/OutlierDetectionNeighbors.jl.git |
|
[
"MIT"
] | 0.1.5 | 45fefd4a5a6e80231e99f8dbb6bb1447dfd358b5 | code | 2532 | using Statistics: median
"""
KNNDetector(k=5,
metric=Euclidean,
algorithm=:kdtree,
leafsize=10,
reorder=true,
reduction=:maximum)
Calculate the anomaly score of an instance based on the distance to its k-nearest neighbors.
Parameters
----------
$K_PARAM
$KNN_PARAMS
reduction::Symbol
One of `(:maximum, :median, :mean)`. (`reduction=:maximum`) was proposed by [1]. Angiulli et al. [2] proposed sum to
reduce the distances, but mean has been implemented for numerical stability.
Examples
--------
$(SCORE_UNSUPERVISED("KNNDetector"))
References
----------
[1] Ramaswamy, Sridhar; Rastogi, Rajeev; Shim, Kyuseok (2000): Efficient Algorithms for Mining Outliers from Large Data
Sets.
[2] Angiulli, Fabrizio; Pizzuti, Clara (2002): Fast Outlier Detection in High Dimensional Spaces.
"""
OD.@detector mutable struct KNNDetector <: UnsupervisedDetector
k::Integer = 5::(_ > 0)
metric::DI.Metric = DI.Euclidean()
algorithm::Symbol = :kdtree::(_ in (:kdtree, :balltree, :brutetree))
static::Union{Bool,Symbol} = :auto::(_ in (true, false, :auto))
leafsize::Integer = 10::(_ ≥ 0)
reorder::Bool = true
parallel::Bool = false
reduction::Symbol = :maximum::(_ in (:maximum, :median, :mean))
end
struct KNNModel <: DetectorModel
tree::NN.NNTree
end
function OD.fit(detector::KNNDetector, X::Data; verbosity)::Fit
X = prepare_data(X, detector.static)
# create the specified tree
tree = @tree detector X
# use tree to calculate distances
_, dists = detector.parallel ?
knn_parallel(tree, X, detector.k, true) :
knn_sequential(tree, X, detector.k, true)
# reduce distances to outlier score
scores = _knn(dists, detector.reduction)
return KNNModel(tree), scores
end
function OD.transform(detector::KNNDetector, model::KNNModel, X::Data)::Scores
X = prepare_data(X, detector.static)
_, dists = detector.parallel ?
knn_parallel(model.tree, X, detector.k) :
knn_sequential(model.tree, X, detector.k)
return _knn(dists, detector.reduction)
end
@inline function _knn(distances::AbstractVector{<:AbstractVector}, reduction::Symbol)::Scores
# Helper function to reduce `k` distances to a single distance.
if reduction == :maximum
return maximum.(distances)
elseif reduction == :median
return median.(distances)
elseif reduction == :mean
return mean.(distances)
end
end
| OutlierDetectionNeighbors | https://github.com/OutlierDetectionJL/OutlierDetectionNeighbors.jl.git |
|
[
"MIT"
] | 0.1.5 | 45fefd4a5a6e80231e99f8dbb6bb1447dfd358b5 | code | 3569 | using Statistics: mean
"""
LOFDetector(k = 5,
metric = Euclidean(),
algorithm = :kdtree,
leafsize = 10,
reorder = true,
parallel = false)
Calculate an anomaly score based on the density of an instance in comparison to its neighbors. This algorithm introduced
the notion of local outliers and was developed by Breunig et al., see [1].
Parameters
----------
$K_PARAM
$KNN_PARAMS
Examples
--------
$(SCORE_UNSUPERVISED("LOFDetector"))
References
----------
[1] Breunig, Markus M.; Kriegel, Hans-Peter; Ng, Raymond T.; Sander, Jörg (2000): LOF: Identifying Density-Based Local
Outliers.
"""
OD.@detector mutable struct LOFDetector <: UnsupervisedDetector
k::Integer = 5::(_ > 0)
metric::DI.Metric = DI.Euclidean()
algorithm::Symbol = :kdtree::(_ in (:kdtree, :balltree, :brutetree))
static::Union{Bool,Symbol} = :auto::(_ in (true, false, :auto))
leafsize::Integer = 10::(_ ≥ 0)
reorder::Bool = true
parallel::Bool = false
end
struct LOFModel <: DetectorModel
# For efficient prediction, we need to store the learned tree, the distances of each training sample to its
# k-nearest neighbors, as well as the training lrds.
tree::NN.NNTree
ndists::AbstractMatrix
lrds::AbstractVector
end
function OD.fit(detector::LOFDetector, X::Data; verbosity)::Fit
X = prepare_data(X, detector.static)
# create the specified tree
tree = @tree detector X
# use tree to calculate distances
idxs, dists = detector.parallel ?
knn_parallel(tree, X, detector.k, true) :
knn_sequential(tree, X, detector.k, true)
# transform dists (vec of vec) to matrix to allow faster indexing later
ndists = reduce(hcat, dists)
# pre calculate lrds for later prediction use
lrds = _calculate_lrd(idxs, dists, ndists, detector.k)
# reduce distances to outlier score
scores = _lof_from_lrd(idxs, lrds)
return LOFModel(tree, ndists, lrds), scores
end
function OD.transform(detector::LOFDetector, model::LOFModel, X::Data)::Scores
X = prepare_data(X, detector.static)
idxs, dists = detector.parallel ?
knn_parallel(model.tree, X, detector.k, false, true) :
knn_sequential(model.tree, X, detector.k, false, true)
return _lof(idxs, dists, model.ndists, model.lrds, detector.k)
end
function _lof(idxs::AbstractVector, dists::AbstractVector, model_dists::AbstractArray,
model_lrds::AbstractVector, k::Int)::Scores
lrds = _calculate_lrd(idxs, dists, model_dists, k)
# calculate the local outlier factor from the lrds
map((idx, lrd) -> mean(model_lrds[idx]) / lrd, idxs, lrds)
end
function _lof_from_lrd(idxs::AbstractVector, lrds::AbstractVector)::Scores
# Directly calculate the local outlier factor from idxs with corresponding lrds.
map((idx, lrd) -> mean(lrds[idx]) / lrd, idxs, lrds)
end
function _calculate_lrd(idxs::AbstractVector, dists::AbstractVector, ndists::AbstractArray, k::Int)::AbstractVector
# The LRD of a sample is the inverse of the average reachability distance of its k-nearest neighbors. Epsilon is
# added in case that there are more than k duplicates.
map((is, ds) -> 1 / (mean(_max!(ndists[k, is], ds)) + 1e-10), idxs, dists)
end
function _max!(ar1::AbstractVector, ar2::AbstractVector)::AbstractVector
# Calculate the element wise maximum between two vectors.
for i in eachindex(ar1)
@inbounds ar1[i] = ar1[i] > ar2[i] ? ar1[i] : ar2[i]
end
ar1
end
| OutlierDetectionNeighbors | https://github.com/OutlierDetectionJL/OutlierDetectionNeighbors.jl.git |
|
[
"MIT"
] | 0.1.5 | 45fefd4a5a6e80231e99f8dbb6bb1447dfd358b5 | code | 1034 | using OutlierDetectionNeighbors
using OutlierDetectionTest
using Distances: Cityblock
# Test the metadata of all exported detectors
test_meta.(eval.(OutlierDetectionNeighbors.MODELS))
data = TestData()
run_test(detector) = test_detector(detector, data)
# ABOD
run_test(ABODDetector())
run_test(ABODDetector(static=false))
run_test(ABODDetector(parallel=true, enhanced=true))
# COF
run_test(COFDetector())
run_test(COFDetector(metric=Cityblock()))
run_test(COFDetector(static=false))
run_test(COFDetector(parallel=true))
# DNN
run_test(DNNDetector(d=1))
run_test(DNNDetector(d=1, static=false))
run_test(DNNDetector(d=1, parallel=true))
# KNN
run_test(KNNDetector(reduction=:maximum))
run_test(KNNDetector(static=false))
run_test(KNNDetector(parallel=true, reduction=:mean, algorithm=:balltree))
run_test(KNNDetector(parallel=true, reduction=:median))
# LOF
run_test(LOFDetector())
run_test(LOFDetector(static=false))
run_test(LOFDetector(parallel=true))
run_test(LOFDetector(static=false, parallel=true, algorithm=:brutetree))
| OutlierDetectionNeighbors | https://github.com/OutlierDetectionJL/OutlierDetectionNeighbors.jl.git |
|
[
"MIT"
] | 0.1.5 | 45fefd4a5a6e80231e99f8dbb6bb1447dfd358b5 | docs | 679 | # OutlierDetectionNeighbors
[](https://OutlierDetectionJL.github.io/OutlierDetection.jl/stable)
[](https://OutlierDetectionJL.github.io/OutlierDetection.jl/dev)
[](https://github.com/OutlierDetectionJL/OutlierDetectionNeighbors.jl/actions)
[](https://codecov.io/gh/OutlierDetectionJL/OutlierDetectionNeighbors.jl)
| OutlierDetectionNeighbors | https://github.com/OutlierDetectionJL/OutlierDetectionNeighbors.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | code | 15846 | ### A Pluto.jl notebook ###
# v0.19.40
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local iv = try Base.loaded_modules[Base.PkgId(Base.UUID("6e696c72-6542-2067-7265-42206c756150"), "AbstractPlutoDingetjes")].Bonds.initial_value catch; b -> missing; end
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : iv(el)
el
end
end
# ╔═╡ dc663401-3356-4cd0-8e01-7619a0e7df42
begin
using PlutoUI
import PlutoUI:combine
import PlutoUI:ExperimentalLayout
end
# ╔═╡ 0ff62a0d-53e6-48b9-8246-3f7b89499417
using TTVFaster,Test
# ╔═╡ ebea622c-b4a2-4fdd-bb3e-2925695da863
using PyPlot, DelimitedFiles
# ╔═╡ acc61da3-1bf6-4413-a3a9-3bacfd318def
@bind reset_to_default_values Button("Reset Sliders")
# ╔═╡ d08ce186-d8e5-4515-9b46-9bb62260936c
begin
function conditions(system::String,iplanet,values::Vector{T}) where (T<:Real)
# Create sliders for given system, planet id (number or letter)
# Here, masses must be given in log
# this allows more precise selection of slider value
# MSUN= 1.98892e33; MEARTH = 5.9742e27 #cgs
names=["log_mu","per","trans0","ecosw","esinw"]
min_vals=[log(1e-8),-2*values[2],-500,-1,-1]
max_vals=[log(1e-2), 2*values[2], 1.05*values[3], 1, 1]
return combine() do Child
inputs=[[
md"""
$(names[i]): $(Child(names[i], Slider(min_vals[i]:0.01:max_vals[i],default=values[i])))
"""
for i=1:3
];[md"""
$(names[i]): $(Child(names[i], Slider(min_vals[i]:0.001:max_vals[i],default=values[i])))
"""
for i=4:5]]
md"""
###### $(system) $(iplanet)
$(inputs)
"""
end
return values
end
# Allow for input of planet_plane structure from TTVFaster
function conditions(system_plname::String,p::Planet_plane_hk{T}) where (T<:Real)
names=["mu","period","trans0","ecosw","esinw"]
min_vals=[1, 1, -200, -1, -1]
max_vals=[50, 500,5000, 1, 1]
return combine() do Child
inputs=[[
md"""
$("mu"): $(Child("mu", Slider(min_vals[1]:0.01:max_vals[1],default=p.mass_ratio)))
"""];[md"""
$("period"): $(Child("period", Slider(min_vals[2]:0.001:max_vals[2],default=p.period)))
"""];[md"""
$("trans0"): $(Child("trans0", Slider(min_vals[3]:0.001:max_vals[3],default=p.trans0)))
"""];[md"""
$("ecosw"): $(Child("ecosw", Slider(min_vals[4]:0.001:max_vals[4],default=p.ecosw)))
"""];[md"""
$("esinw"): $(Child("esinw", Slider(min_vals[4]:0.001:max_vals[4],default=p.esinw)))
""" ]]
md"""
###### $(system_plname)
$(inputs)
"""
end
return values
end
md""" Create sliders based on orbital elements of a planet
"""
end
# ╔═╡ 4702121d-b071-4dd3-a43b-00fdf9c33f70
begin
data=readdlm("../examples/trappist1efg_planets.txt",',',Float64)
# Define initial conditions (i.e. the default parameters) for sliders
# Bind sliders to parameters that we want to model, add bonds as necessary
system="Trappist-1"
# For demonstration purposes only,
# this does not include the effects from other planets in the system
p1=@bind planet1 conditions(system,"e",
[log(data[1]) ;data[2:5]])
p2=@bind planet2 conditions(system,"f",
[log(data[6]) ;data[7:10]])
p3=@bind planet3 conditions(system,"g",
[log(data[11]) ;data[12:15]])
# Bind reset button to sliders
let reset_to_default_values
p1
p2
p3
end
# Create grid layout for multiple sliders, add bonds as necessary
# Layout must be in matrix form (i.e. no empty elements)
function create_grid(bonds)
bond1,bond2,bond3=bonds[1:end]
grid=PlutoUI.ExperimentalLayout.grid([
bond1 bond2 bond3
])
return grid
end
md" Approximate values for $system planets e/f/g"
end
# ╔═╡ 3c6c85c7-299a-4d5f-8769-a236e31bfd14
create_grid([p1,p2,p3])
# ╔═╡ b16d8dd8-00c3-4bc0-8a93-bedd356dc577
param_sliders=[
planet1.log_mu planet1.per planet1.trans0 planet1.ecosw planet1.esinw
planet2.log_mu planet2.per planet2.trans0 planet2.ecosw planet2.esinw
planet3.log_mu planet3.per planet3.trans0 planet3.ecosw planet3.esinw
] # Binds parameter sliders that we want to plot
# ╔═╡ 6ec3cebb-f763-431e-be22-2f7ce3072fc0
# View individual parameters for a planet
planet1
# ╔═╡ 7cb10c9e-24eb-48b8-9b35-697fec3ec79c
begin
# from test_ttv.jl
include("test_ttv.jl")
hk1=TTVFaster.Planet_plane_hk(data[1], data[2], data[3], data[4], data[5])
hk2=TTVFaster.Planet_plane_hk(data[6], data[7], data[8], data[9], data[10])
function kepler62_test()
data=readdlm("../examples/kepler62ef_planets.txt",',',Float64)
@time ttv1,ttv2=test_ttv(5,40,20,data,WriteOutput=false,num_evals=100000);
inner=readdlm("../examples/inner_ttv.txt")
outer=readdlm("../examples/outer_ttv.txt")
inner_ref=readdlm("inner_ttv.txt.ref")
outer_ref=readdlm("outer_ttv.txt.ref")
diffs=[maximum(abs.(inner_ref .- inner)) maximum(abs.(outer_ref .- outer))]
max_diff=maximum(diffs)
return round(max_diff)
end
@testset "TTVFaster.jl" begin
@test kepler62_test() == (0.0)
# @test trappist1_test()
end
end
# ╔═╡ 525e9b6c-8255-42d7-a704-3924f0d27213
begin
# Plot the provided system parameters, given transit counts
# params must be an matrix with shape (nplanet, 5)
function plot_TTVs(ntrans,params)
# @assert(length(ntrans)==nplanet)
@assert(size(params)[1]==length(ntrans))
nplanet=size(params)[1]
# Reshape parameters into array with shape(1 x 5*nplanet)
new_params=reshape(transpose(params),1,nplanet*5)
# Get pair-wise TTVs
function decompose_ttvs(nplanet,ntrans,params)
jmax = 5
pair_ttvs = zeros(nplanet,nplanet,maximum(ntrans))
for i=1:nplanet-1,j=i+1:nplanet
param = [exp(params[(i-1)*5+1]);params[(i-1)*5+2:i*5] ;exp(params[(j-1)*5+1]); params[(j-1)*5+2:j*5]]
ttv = TTVFaster.ttv_nplanet(2,jmax,[ntrans[i];ntrans[j]],param)
pair_ttvs[i,j,1:ntrans[i]] = ttv[1,1:ntrans[i]] #planet i wrt planet j
pair_ttvs[j,i,1:ntrans[j]] = ttv[2,1:ntrans[j]] #planet j wrt planet i
end
return pair_ttvs
end
pair_ttvs=decompose_ttvs(nplanet,ntrans,new_params) .* (24 * 60)
# Assuming that there's 2 transiting planets
n1=ntrans[1];n2=ntrans[2]
t1 = collect(new_params[3] .+ new_params[2] .* range(0,stop=n1-1,length=n1))
t2 = collect(new_params[7] .+ new_params[6] .* range(0,stop=n2-1,length=n2))
p1_ttvs=sum([pair_ttvs[1,iplanet,1:n1] for iplanet=1:nplanet],dims=1)[1]
p2_ttvs=sum([pair_ttvs[2,iplanet,1:n2] for iplanet=1:nplanet],dims=1)[1]
# Plot TTVs
fig=plt.figure(figsize=(6,4))
ax=fig.add_subplot(221)
ax3=fig.add_subplot(222,sharey=ax,sharex=ax)
ax3.plot(t1,transpose(p1_ttvs),color="grey")
ax3.set_title("Total TTVs")
ax2=fig.add_subplot(223)
ax4=fig.add_subplot(224,sharey=ax2,sharex=ax2)
ax4.plot(t2,transpose(p2_ttvs),color="grey")
for iplanet=1:nplanet
ax2.plot(t2,pair_ttvs[2,iplanet,1:n2],label=string("planet ",iplanet))
ax.plot(t1,pair_ttvs[1,iplanet,1:n1])#,label=string("planet",iplanet))
end
ax2.set_ylabel("TTVs [min]")
ax.set_ylabel("TTVs [min]")
ax2.set_xlabel(string("Time [JD ",L"$- t_{0,1}$","]"))
ax4.set_xlabel(string("TIme [JD ",L"$- t_{0,1}$","]"))
ax.minorticks_on() ; ax2.minorticks_on() ; plt.tight_layout()
fig.legend(loc="upper left",title="Source of Perturbations",fontsize="small",ncol=nplanet)
return fig
end
end
# ╔═╡ 4db7da85-ae23-4a34-bb69-5aab907d7145
plot_TTVs([40,20,2],param_sliders)
# ╔═╡ e00727ea-f5fb-49bc-83c2-21fb50dbe7e1
# using HypertextLiteral:@htl
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
PyPlot = "d330b81b-6aea-500a-939a-2ce795aea3ee"
TTVFaster = "d84f081e-b698-44a3-a477-911041168508"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[compat]
PlutoUI = "~0.7.59"
PyPlot = "~2.11.5"
TTVFaster = "~0.2.0"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
[[AbstractPlutoDingetjes]]
deps = ["Pkg"]
git-tree-sha1 = "6e1d2a35f2f90a4bc7c2ed98079b2ba09c35b83a"
uuid = "6e696c72-6542-2067-7265-42206c756150"
version = "1.3.2"
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "b10d0b65641d57b8b4d5e234446582de5047050d"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.5"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "362a287c3aa50601b0bc359053d5c2468f0e7ce0"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.11"
[[Conda]]
deps = ["Downloads", "JSON", "VersionParsing"]
git-tree-sha1 = "b19db3927f0db4151cb86d073689f2428e524576"
uuid = "8f4d0f93-b110-5947-807f-2305c1781a2d"
version = "1.10.2"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[Hyperscript]]
deps = ["Test"]
git-tree-sha1 = "179267cfa5e712760cd43dcae385d7ea90cc25a4"
uuid = "47d2ed2b-36de-50cf-bf87-49c2cf4b8b91"
version = "0.0.5"
[[HypertextLiteral]]
deps = ["Tricks"]
git-tree-sha1 = "7134810b1afce04bbc1045ca1985fbe81ce17653"
uuid = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2"
version = "0.9.5"
[[IOCapture]]
deps = ["Logging", "Random"]
git-tree-sha1 = "8b72179abc660bfab5e28472e019392b97d0985c"
uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89"
version = "0.2.4"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.4"
[[LaTeXStrings]]
git-tree-sha1 = "50901ebc375ed41dbf8058da26f9de442febbbec"
uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
version = "1.3.1"
[[LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[MIMEs]]
git-tree-sha1 = "65f28ad4b594aebe22157d6fac869786a255b7eb"
uuid = "6c6e2e6c-3030-632d-7369-2d6c69616d65"
version = "0.1.4"
[[MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "2fa9ee3e63fd3a4f7a9a4f4744a52f4856de82df"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.13"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"]
git-tree-sha1 = "8489905bcdbcfac64d1daa51ca07c0d8f0283821"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.8.1"
[[Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[PlutoUI]]
deps = ["AbstractPlutoDingetjes", "Base64", "ColorTypes", "Dates", "FixedPointNumbers", "Hyperscript", "HypertextLiteral", "IOCapture", "InteractiveUtils", "JSON", "Logging", "MIMEs", "Markdown", "Random", "Reexport", "URIs", "UUIDs"]
git-tree-sha1 = "ab55ee1510ad2af0ff674dbcced5e94921f867a9"
uuid = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
version = "0.7.59"
[[PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "5aa36f7049a63a1528fe8f7c3f2113413ffd4e1f"
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
version = "1.2.1"
[[Preferences]]
deps = ["TOML"]
git-tree-sha1 = "9306f6085165d270f7e3db02af26a400d580f5c6"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.4.3"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[PyCall]]
deps = ["Conda", "Dates", "Libdl", "LinearAlgebra", "MacroTools", "Serialization", "VersionParsing"]
git-tree-sha1 = "9816a3826b0ebf49ab4926e2b18842ad8b5c8f04"
uuid = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0"
version = "1.96.4"
[[PyPlot]]
deps = ["Colors", "LaTeXStrings", "PyCall", "Sockets", "Test", "VersionParsing"]
git-tree-sha1 = "0371ca706e3f295481cbf94c8c36692b072285c2"
uuid = "d330b81b-6aea-500a-939a-2ce795aea3ee"
version = "2.11.5"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[TTVFaster]]
deps = ["DelimitedFiles"]
git-tree-sha1 = "7fea3b01e0d667fd77281ad215efeff82948bbf3"
uuid = "d84f081e-b698-44a3-a477-911041168508"
version = "0.2.0"
[[Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[Tricks]]
git-tree-sha1 = "eae1bb484cd63b36999ee58be2de6c178105112f"
uuid = "410a4b4d-49e4-4fbc-ab6d-cb71b17b3775"
version = "0.1.8"
[[URIs]]
git-tree-sha1 = "67db6cc7b3821e19ebe75791a9dd19c9b1188f2b"
uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
version = "1.5.1"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[VersionParsing]]
git-tree-sha1 = "58d6e80b4ee071f5efd07fda82cb9fbe17200868"
uuid = "81def892-9a0e-5fdd-b105-ffc91e053289"
version = "1.3.0"
[[Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
"""
# ╔═╡ Cell order:
# ╠═acc61da3-1bf6-4413-a3a9-3bacfd318def
# ╠═3c6c85c7-299a-4d5f-8769-a236e31bfd14
# ╠═4db7da85-ae23-4a34-bb69-5aab907d7145
# ╠═b16d8dd8-00c3-4bc0-8a93-bedd356dc577
# ╠═4702121d-b071-4dd3-a43b-00fdf9c33f70
# ╠═6ec3cebb-f763-431e-be22-2f7ce3072fc0
# ╠═7cb10c9e-24eb-48b8-9b35-697fec3ec79c
# ╠═d08ce186-d8e5-4515-9b46-9bb62260936c
# ╠═525e9b6c-8255-42d7-a704-3924f0d27213
# ╠═dc663401-3356-4cd0-8e01-7619a0e7df42
# ╠═e00727ea-f5fb-49bc-83c2-21fb50dbe7e1
# ╠═0ff62a0d-53e6-48b9-8246-3f7b89499417
# ╠═ebea622c-b4a2-4fdd-bb3e-2925695da863
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
| TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | code | 2019 | # This code calls the first-order eccentric TTV code
# compute_ttv.jl. Please cite Agol & Deck (2015) if
# you make use of this in your research.
# include("../src/TTVFaster.jl")
# include("laplace_coefficients_initialize.jl")
function test_ttv(jmax::Integer,n1::Integer,n2::Integer,data::Array{Float64}; WriteOutput::Bool = true, num_evals::Integer = 1, profile::Bool = false)
@assert(jmax>=1) # Should there be a larger minimum?
@assert(n1>2)
@assert(n2>2)
@assert(length(data)==10)
# Performs a test of the compute_ttv.jl routine
# Set up planets planar-planet types for the inner and outer planets:
p1=TTVFaster.Planet_plane_hk(data[1],data[2],data[3],data[4],data[ 5])
p2=TTVFaster.Planet_plane_hk(data[6],data[7],data[8],data[9],data[10])
time1 = collect(p1.trans0 .+ range(0,stop=n1-1,length=n1) .* p1.period)
time2 = collect(p2.trans0 .+ range(0,stop=n2-1,length=n2) .* p2.period)
alpha0=(p1.period/p2.period)^(2//3)
# Initialize the computation of the Laplace coefficients:
# b0=TTVFaster.LaplaceCoefficients.initialize(jmax+1,alpha0)
# Define arrays to hold the TTVs:
#ttv1=Array(Float64,n1)
#ttv2=Array(Float64,n2)
ttv_el_type = eltype(data) == Float64 ? Float64 : Number
ttv1=zeros(ttv_el_type,n1)
ttv2=zeros(ttv_el_type,n2)
# Define arrays to hold the TTV coefficients and Laplace coefficients:
f1=zeros(Float64,jmax+2,5)
f2=zeros(Float64,jmax+2,5)
b=zeros(Float64,jmax+2,3)
hashsum = 0
for i in 1:num_evals
# Call the compute_ttv code which implements equation (33)
TTVFaster.compute_ttv!(jmax,p1,p2,time1,time2,ttv1,ttv2)#,f1,f2,b,alpha0,b0)
if profile
hashsum += hash(ttv1)+hash(ttv2)
end
end
if profile
println("# Ignore this: ",hashsum) # This just makes sure optimizer doesn't optimize away important calculations.
end
if WriteOutput
# Write the mean ephemeris and TTV results to two files:
writedlm("inner_ttv.txt",[time1 ttv1])
writedlm("outer_ttv.txt",[time2 ttv2])
end
return ttv1,ttv2
end
| TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | code | 446 | """
TTVFaster
Computes first order eccentricity transit timing variations (TTVs) with respect to the following initial properties: the planet-star mass ratio [μ], the initial transit time (of the averaged orbit) [t0], the mean orbital period [Per], the eccentricity [e], and the longitude of periastron [barred ω].
"""
module TTVFaster
include("ttv_wrapper.jl")
export Planet_plane_hk, ttv_wrapper, compute_ttv!
# export Planet_plane
end
| TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | code | 5240 | include("ttv_succinct.jl")
"""
Computes transit timing variations to linear order in eccentricity for non-resonant, plane-parallel planets.
Please cite Agol & Deck (2015) if you make use of this in published research.
"""
struct Planet_plane # Deprecated, only exported to make the error message work
mass_ratio :: Float64
period :: Float64
trans0 :: Float64
ecc :: Float64
omega :: Float64
end
struct Planet_plane_hk{T<:Real} # Parameters of a planet in a plane-parallel system
# Mass ratio of the planet to the star:
mass_ratio :: T
# Initial time of transit:
period :: T
trans0 :: T
# e times cos or sin of longitude of periastron measured from line of sight, in radians:
ecosw :: T
esinw :: T
end
"""
compute_ttv(jmax,p1,p2,time1,time2,ttv1,ttv2)
Arguments:
jmax: Maximum j over which to sum the TTV calculation for both planets
p1: Planet type for inner planet
p2: Planet type for outer planet
time1: Transit times for inner planet
time2: Transit times for outer planet
ttv1: Container for TTVs of the inner planet
ttv2: Container for TTVs of the outer planet
Outputs:
ttv1
ttv2
"""
function compute_ttv!(jmax::Integer,p1::Planet_plane_hk{T},p2::Planet_plane_hk{T},time1::Vector{T},time2::Vector{T},ttv1::Vector{T},ttv2::Vector{T}) where T<:Real
# Compute the semi-major axis ratio of the planets:
# println(p1.period,p2.period)
alpha = (p1.period/p2.period)^(2//3)
#println(alpha, p1.period, p2.period)
@assert(alpha < 1)
@assert(alpha > 0)
# Number of times:
ntime1 = length(time1)
ntime2 = length(time2)
f1=zeros(T,jmax+2,5)
f2=zeros(T,jmax+2,5)
# Compute the coefficients:
ttv_succinct!(jmax+1,alpha,f1,f2) # I need to compute coefficients one higher than jmax
# Compute TTVs for inner planet (equation 33):
# Compute since of \pomegas:
e1 = sqrt(p1.esinw*p1.esinw + p1.ecosw*p1.ecosw)
e2 = sqrt(p2.esinw*p2.esinw + p2.ecosw*p2.ecosw)
sin1om=p1.esinw/e1
sin2om=p2.esinw/e2
cos1om=p1.ecosw/e1
cos2om=p2.ecosw/e2
# Compute mean motions:
n1=2pi/p1.period
n2=2pi/p2.period
# Compute initial longitudes:
lam10=-n1*p1.trans0 + 2*p1.esinw # 2*p1.eccen*sin1om
lam20=-n2*p2.trans0 + 2*p2.esinw # 2*p2.eccen*sin2om
@inbounds for i=1:ntime1
# Compute the longitudes of the planets at times of transit of planet 1 (equation 49):
lam11 = n1*time1[i]+lam10
lam21 = n2*time1[i]+lam20
psi1 = lam11-lam21 # Compute difference in longitudes at times of transit of planet 1
sinpsi1=sin(psi1)
cospsi1=cos(psi1)
sinlam11 = sin(lam11)
coslam11 = cos(lam11)
sinlam1om1=sinlam11*cos1om - coslam11*sin1om
coslam1om1=coslam11*cos1om + sinlam11*sin1om
sinlam1om2=sinlam11*cos2om - coslam11*sin2om
coslam1om2=coslam11*cos2om + sinlam11*sin2om
ttv1[i]=zero(T) #0.0
sinjm1psi1=zero(T) #0.0
cosjm1psi1=one(T) #1.0
# Sum over j:
for j=1:jmax
sinjpsi1=sinjm1psi1*cospsi1 + cosjm1psi1*sinpsi1
cosjpsi1=cosjm1psi1*cospsi1 - sinjm1psi1*sinpsi1
ttv1[i] += f1[j+1,1]*sinjpsi1
ttv1[i] += f1[j+1,2]*e1*(sinjpsi1*coslam1om1 - cosjpsi1*sinlam1om1)
ttv1[i] += f1[j+1,3]*e1*(sinjpsi1*coslam1om1 + cosjpsi1*sinlam1om1)
ttv1[i] += f1[j ,4]*e2*(sinjpsi1*coslam1om2 - cosjpsi1*sinlam1om2)
ttv1[i] += f1[j+2,5]*e2*(sinjpsi1*coslam1om2 + cosjpsi1*sinlam1om2)
sinjm1psi1=sinjpsi1
cosjm1psi1=cosjpsi1
end
# Multiply by period and mass ratio, and divide by 2*Pi:
ttv1[i] = ttv1[i]*p1.period*p2.mass_ratio/(2pi)
end
# Compute TTVs for outer planet (equation 33):
@inbounds for i=1:ntime2
# Compute the longitudes of the planets at times of transit of planet 2:
lam12 = n1*time2[i]+lam10
lam22 = n2*time2[i]+lam20
sinlam22 = sin(lam22)
coslam22 = cos(lam22)
psi2 = lam12-lam22 # Compute difference in longitudes at times of transit of planet 2
sinpsi2=sin(psi2)
cospsi2=cos(psi2)
sinlam2om1=sinlam22*cos1om - coslam22*sin1om
coslam2om1=coslam22*cos1om + sinlam22*sin1om
sinlam2om2=sinlam22*cos2om - coslam22*sin2om
coslam2om2=coslam22*cos2om + sinlam22*sin2om
ttv2[i]=zero(p2.period) #0.0
sinjm1psi2=zero(p2.period) #0.0
cosjm1psi2=one(p2.period) #1.0
# Sum over j:
for j=1:jmax
sinjpsi2=sinjm1psi2*cospsi2 + cosjm1psi2*sinpsi2
cosjpsi2=cosjm1psi2*cospsi2 - sinjm1psi2*sinpsi2
ttv2[i] += f2[j+1,1]*sinjpsi2
ttv2[i] += f2[j+1,2]*e2*(sinjpsi2*coslam2om2 - cosjpsi2*sinlam2om2)
ttv2[i] += f2[j+1,3]*e2*(sinjpsi2*coslam2om2 + cosjpsi2*sinlam2om2)
ttv2[i] += f2[j+2,4]*e1*(sinjpsi2*coslam2om1 - cosjpsi2*sinlam2om1)
ttv2[i] += f2[j ,5]*e1*(sinjpsi2*coslam2om1 + cosjpsi2*sinlam2om1)
sinjm1psi2=sinjpsi2
cosjm1psi2=cosjpsi2
end
# Multiply by period and mass ratio, and divide by 2*Pi:
ttv2[i] = ttv2[i]*p2.period*p1.mass_ratio/(2pi)
end
# Finished!
return
end # compute_ttv!
function compute_ttv!(jmax::Integer,p1::Planet_plane,p2::Planet_plane,time1::Vector,time2::Vector,ttv1::Vector,ttv2::Vector)
# Error message to explain to anyone who tries to use the old version
error("The Planet_plane data structure has been deprecated in favor of Planet_plane_hk")
end | TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | code | 527 | module LaplaceCoefficients
export laplace_coefficients_initialize
export laplace_wisdom
include("laplace_wisdom.jl")
"""
# This computes the Laplace coefficients via recursion.
"""
function initialize(jmax::Integer,alpha::Number)
global nmax=7
b0=Array(eltype(alpha),nmax,jmax+1) # Array to hold the coefficients
# Compute the highest two Laplace coefficients using Wisdom's series approach:
for j=0:jmax
for i=0:nmax-1
b0[i+1,j+1]=laplace_wisdom(1//2,j,i,alpha)/alpha^i
end
end
return b0
end
end # module | TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | code | 1502 | """
# Compute Laplace coefficients and Leverrier derivative/
# j
# j d i
# a --- b (a)
# j s
# da
# by series summation
# Code due to Jack Wisdom
"""
function laplace_wisdom(s::Rational,i::Integer,j::Integer,a::T) where T<:Real
# define LAPLACE_EPS 1.0e-12
LAPLACE_EPS = convert(T,1.0e-12)
#if (i lt 0) then i = -i
i=abs(i)
if j <= i # compute first term in sum
factor4 = one(T)
for k=0:j-1
factor4 *= i - k
end
lap_coef_sum = factor4
q0 = 0
else
q0 = fld(j + 1 - i,2) # largest integer less than or equal to x/y
lap_coef_sum = zero(T)
factor4 = one(T)
end
# Compute factors for terms in lap_coef_sum:
factor1 = s
factor2 = s + i
factor3 = i + 1
for q=1:q0-1 # no contribution for q = 0
factor1 *= s + q
factor2 *= s + i + q
factor3 *= i + q + 1
end
if q0 > 1
q=q0
else
q=1
end
#println(j+1-i,q0)
term = a*a * factor1 * factor2 / (factor3 * q)
# Sum series:
while (term*factor4) > LAPLACE_EPS
factor4 = one(T)
for k=0:j-1
factor4 *= (2*q + i - k)
end
lap_coef_sum += term * factor4
factor1 += 1
factor2 += 1
factor3 += 1
q = q+1
term *= a*a * factor1 * factor2 / (factor3 * q)
end
# Fix coefficient:
for k=0:i-1
lap_coef_sum *= (s+k)/(k+1)
end
apower = (q0 <= 0) ? i : 2*q0 + i - 2
lap_coef_sum *= 2 * a^apower
# Return the Laplace Coefficient:
return lap_coef_sum
end | TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | code | 2930 | include("compute_ttv.jl")
"""
ttv_nplanet(nplanet,jmax,ntrans,params)
Computes TTVs with TTVFaster for N planets with pairwise TTV calculation.
Arguments:
nplanet: Number of planets
jmax: Maximum j over which to sum the TTV calculation
ntrans: Number of transits for each planet
params: Parameters of each planet
Returns:
ttvs: Computed transit timing variations for the transiting planets.
"""
function ttv_nplanet(nplanet::Int64,jmax::Int64,ntrans::Vector{Int64},params::Vector{T}) where T<:Real
# Need at least two planets!
@assert(nplanet>=2)
# The ntrans vectors should have length nplanet:
@assert(length(ntrans)==nplanet)
# Define type of ttv array:
ttv_el_type = eltype(params) == Float64 ? Float64 : Number
# Need to create an array to store TTVs with maximum length equal to maximum number of transit times of any planet:
ntransmax = maximum(ntrans)
ttv = zeros(T,nplanet,ntransmax)
# Each planet requires 5 elements in params (mass_ratio,period,trans0,ecosw,esinw):
@assert(length(params)==5*nplanet)
@assert(jmax>=1)
for iplanet=1:nplanet
# Each planet should have at least 2 transits:
@assert(ntrans[iplanet]>=2)
end
for iplanet=1:nplanet-1
# The periods of the planets should be ordered from least to greatest:
if (params[(iplanet-1)*5+2] >= params[iplanet*5+2])
return ttv
end
end
# Set up planets planar-planet types for all of the planets:
#planet = Array{Planet_plane_hk}(nplanet)
#planet = Array{Any}(nplanet)
# Loop over pairs of planets to compute pairwise TTVs
# Loop over inner planets:
#println("Looping over planets in ttv_nplanet:")
for iplanet=1:nplanet-1
# Create a Planet_plane_hk type for the inner planet:
p1=Planet_plane_hk(params[(iplanet-1)*5+1],params[(iplanet-1)*5+2],params[(iplanet-1)*5+3],params[(iplanet-1)*5+4],params[(iplanet-1)*5+5])
# Create an array of times for the inner planet:
n1 = ntrans[iplanet]
time1 = collect(p1.trans0 .+ range(0,stop=n1-1,length=n1) .* p1.period)
# Loop over outer planets:
for jplanet=iplanet+1:nplanet
# Create a Planet_plane_hk type for the outer planet:
p2=Planet_plane_hk(params[(jplanet-1)*5+1],params[(jplanet-1)*5+2],params[(jplanet-1)*5+3],params[(jplanet-1)*5+4],params[(jplanet-1)*5+5])
# Create an array of times for the outer planet:
n2 = ntrans[jplanet]
time2 = collect(p2.trans0 .+ range(0,stop=n2-1,length=n2) .* p2.period)
# Define arrays to hold the TTVs:
ttv1=zeros(T,n1)
ttv2=zeros(T,n2)
# Call the compute_ttv code which implements equation (33) from Agol & Deck (2016):
# println("Calling compute_ttv")
compute_ttv!(jmax,p1,p2,time1,time2,ttv1,ttv2)
# println("Finished compute_ttv")
for i=1:n1
ttv[iplanet,i] += ttv1[i]
end
for i=1:n2
ttv[jplanet,i] += ttv2[i]
end
end
end
return ttv
end
| TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | code | 8481 | # Computes TTV coefficients for first-order eccentricity
# solution from Agol & Deck (2015). Please cite this paper
# if you make use of this in your research.
include("laplace_wisdom.jl")
u(gamma::T,c1::T,c2::T) where {T<:Number}= ((3+gamma*gamma)*c1+2*gamma*c2)/(gamma*gamma*(1-gamma*gamma))
# m=+/-1
v(z::T,d1::T,d2::T,m::Integer) where {T<:Number}= ((m*(1-z*z)+6*z)*d1+(2+z*z)*d2)/(z*(1-z*z)*(z+m)*(z+2*m))
function ttv_succinct!(jmax::Integer,alpha::T,f1::Array{T,2},f2::Array{T,2}) where T<:Real
# See simple_solution.pdf 7/16/2015
b=zeros(T,jmax+2,3)
#println("Computing Laplace Coeffcients: ",alpha)
for i=0:2
for j=0:jmax
b[j+1,i+1]=laplace_wisdom(1//2,j,i,alpha)/alpha^i
end
end
#println("Done with Laplace Coeffcients: ",alpha)
sqrtalpha = sqrt(alpha)
# Loop over j:
@inbounds for j=0:jmax
# \delta_{j1} (this is indirect coefficient which is only needed for j=1)
dj1 = j==1 ? 1.0 : 0.0
# Compute dimensionless frequencies (equation 30):
beta = j*(1-alpha*sqrtalpha)
kappa = beta / (alpha*sqrtalpha)
# Compute disturbing function coefficients (equation 31):
A_j00 = b[j+1,1]
A_j10 = alpha* b[j+1,2]
A_j01 = -(A_j10 + A_j00)
A_j20 = alpha*alpha * b[j+1,3]
A_j11 = -(2*A_j10 + A_j20)
A_j02 = 2*A_j00 + 4*A_j10 + A_j20
jd=convert(eltype(alpha),j)
# Inner planet coefficients, in order k=0,-1,1,-2,2 (see Table 1):
if j >=2
f1[j+1,1]=alpha*u(beta ,jd*( A_j00-alpha*dj1),A_j10-alpha*dj1)
f1[j+1,2]=alpha*u(beta-1.0 ,jd*(-jd*A_j00-0.5*A_j10+1.5*alpha*dj1),-jd*A_j10-0.5*A_j20+alpha*dj1)
f1[j+1,3]=alpha*u(beta+1.0 ,jd*( jd*A_j00-0.5*A_j10-0.5*alpha*dj1), jd*A_j10-0.5*A_j20-alpha*dj1)
f1[j+1,4]=alpha*u(beta-alpha*sqrtalpha,jd*( jd*A_j00-0.5*A_j01-2.0*alpha*dj1), jd*A_j10-0.5*A_j11-2.0*alpha*dj1)
f1[j+1,5]=alpha*u(beta+alpha*sqrtalpha,jd*(-jd*A_j00-0.5*A_j01),-jd*A_j10-0.5*A_j11)
else
if j==0
f1[j+1,4]=alpha*u(beta-alpha*sqrtalpha,jd*( jd*A_j00-0.5*A_j01-2.0*alpha*dj1), jd*A_j10-0.5*A_j11-2.0*alpha*dj1)
else
f1[j+1,1]=alpha*u(beta ,jd*( A_j00-alpha*dj1),A_j10-alpha*dj1)
f1[j+1,2]=alpha*u(beta-1.0 ,jd*(-jd*A_j00-0.5*A_j10+1.5*alpha*dj1),-jd*A_j10-0.5*A_j20+alpha*dj1)
f1[j+1,3]=alpha*u(beta+1.0 ,jd*( jd*A_j00-0.5*A_j10-0.5*alpha*dj1), jd*A_j10-0.5*A_j20-alpha*dj1)
f1[j+1,4]=alpha*u(beta-alpha*sqrtalpha,jd*( jd*A_j00-0.5*A_j01-2.0*alpha*dj1), jd*A_j10-0.5*A_j11-2.0*alpha*dj1)
end
end
# Add in the k=\pm 1 coefficients (note that d1 & d2 are the same as c1 & c2 for k=0):
if j >= 1
f1[j+1,2]=f1[j+1,2]+alpha*v(beta,jd*(A_j00-alpha*dj1),A_j10-alpha*dj1,-1)
f1[j+1,3]=f1[j+1,3]+alpha*v(beta,jd*(A_j00-alpha*dj1),A_j10-alpha*dj1, 1)
end
# Now for the outer planet:
# Outer planet coefficients, in order k=0,-2,2,-1,1 (see Table 1):
one_over_alpha_squared = 1/(alpha*alpha)
if j >= 2
f2[j+1,1]=u(kappa,-jd*(A_j00-dj1*one_over_alpha_squared),A_j01-dj1*one_over_alpha_squared)
f2[j+1,2]=u(kappa-1,-jd*(jd*A_j00-0.5*A_j01-0.5*dj1*one_over_alpha_squared),jd*A_j01-0.5*A_j02-dj1*one_over_alpha_squared)
f2[j+1,3]=u(kappa+1,-jd*(-jd*A_j00-0.5*A_j01+1.5*dj1*one_over_alpha_squared),-jd*A_j01-0.5*A_j02+dj1*one_over_alpha_squared)
f2[j+1,4]=u(kappa-1/(alpha*sqrtalpha),-jd*(-jd*A_j00-0.5*A_j10),-jd*A_j01-0.5*A_j11)
else
if j == 1
f2[j+1,1]=u(kappa,-jd*(A_j00-dj1*one_over_alpha_squared),A_j01-dj1*one_over_alpha_squared)
f2[j+1,2]=u(kappa-1,-jd*(jd*A_j00-0.5*A_j01-0.5*dj1*one_over_alpha_squared),jd*A_j01-0.5*A_j02-dj1*one_over_alpha_squared)
f2[j+1,3]=u(kappa+1,-jd*(-jd*A_j00-0.5*A_j01+1.5*dj1*one_over_alpha_squared),-jd*A_j01-0.5*A_j02+dj1*one_over_alpha_squared)
end
end
f2[j+1,5]=u(kappa+1/(alpha*sqrtalpha),-jd*(jd*A_j00-0.5*A_j10-2.0*dj1*one_over_alpha_squared),jd*A_j01-0.5*A_j11-2.0*dj1*one_over_alpha_squared)
# Add in the k=\pm 2 coefficients (note that d1 & d2 are the same as c1 & c2 for k=0):
if j >= 1
f2[j+1,2]=f2[j+1,2]+v(kappa,-jd*(A_j00-dj1*one_over_alpha_squared),A_j01-dj1*one_over_alpha_squared,-1)
f2[j+1,3]=f2[j+1,3]+v(kappa,-jd*(A_j00-dj1*one_over_alpha_squared),A_j01-dj1*one_over_alpha_squared, 1)
end
# That's it!
end #end of loop
return
end
# Computes Taylor series expansion for when period is fixed. code exists somewhere
function ttv_succinct!(jmax::Integer,alpha::Number,f1::Array{Float64,2},f2::Array{Float64,2},b::Array{Float64,2},alpha0::Number,b0::Array{Float64,2})
# See simple_solution.pdf 7/16/2015
# Fourth-order Taylor expansion approximation of Laplace coefficients:
dalpha = alpha-alpha0
for i=0:2
for j=0:jmax
b[j+1,i+1]=b0[i+1,j+1]+dalpha*(b0[i+2,j+1]+0.5*dalpha*(b0[i+3,j+1]+dalpha/3.0*(b0[i+4,j+1]+dalpha*0.25*b0[i+5,j+1])))
end
end
sqrtalpha = sqrt(alpha)
# Loop over j:
@inbounds for j=0:jmax
# \delta_{j1} (this is indirect coefficient which is only needed for j=1)
dj1 = j==1 ? 1.0 : 0.0
# Compute dimensionless frequencies (equation 30):
beta = j*(1-alpha*sqrtalpha)
kappa = beta / (alpha*sqrtalpha)
# Compute disturbing function coefficients (equation 31):
A_j00 = b[j+1,1]
A_j10 = alpha* b[j+1,2]
A_j01 = -(A_j10 + A_j00)
A_j20 = alpha*alpha * b[j+1,3]
A_j11 = -(2*A_j10 + A_j20)
A_j02 = 2*A_j00 + 4*A_j10 + A_j20
jd=convert(eltype(alpha),j)
# Inner planet coefficients, in order k=0,-1,1,-2,2 (see Table 1):
if j >=2
f1[j+1,1]=alpha*u(beta ,jd*( A_j00-alpha*dj1),A_j10-alpha*dj1)
f1[j+1,2]=alpha*u(beta-1.0 ,jd*(-jd*A_j00-0.5*A_j10+1.5*alpha*dj1),-jd*A_j10-0.5*A_j20+alpha*dj1)
f1[j+1,3]=alpha*u(beta+1.0 ,jd*( jd*A_j00-0.5*A_j10-0.5*alpha*dj1), jd*A_j10-0.5*A_j20-alpha*dj1)
f1[j+1,4]=alpha*u(beta-alpha*sqrtalpha,jd*( jd*A_j00-0.5*A_j01-2.0*alpha*dj1), jd*A_j10-0.5*A_j11-2.0*alpha*dj1)
f1[j+1,5]=alpha*u(beta+alpha*sqrtalpha,jd*(-jd*A_j00-0.5*A_j01),-jd*A_j10-0.5*A_j11)
else
if j==0
f1[j+1,4]=alpha*u(beta-alpha*sqrtalpha,jd*( jd*A_j00-0.5*A_j01-2.0*alpha*dj1), jd*A_j10-0.5*A_j11-2.0*alpha*dj1)
else
f1[j+1,1]=alpha*u(beta ,jd*( A_j00-alpha*dj1),A_j10-alpha*dj1)
f1[j+1,2]=alpha*u(beta-1.0 ,jd*(-jd*A_j00-0.5*A_j10+1.5*alpha*dj1),-jd*A_j10-0.5*A_j20+alpha*dj1)
f1[j+1,3]=alpha*u(beta+1.0 ,jd*( jd*A_j00-0.5*A_j10-0.5*alpha*dj1), jd*A_j10-0.5*A_j20-alpha*dj1)
f1[j+1,4]=alpha*u(beta-alpha*sqrtalpha,jd*( jd*A_j00-0.5*A_j01-2.0*alpha*dj1), jd*A_j10-0.5*A_j11-2.0*alpha*dj1)
end
end
# Add in the k=\pm 1 coefficients (note that d1 & d2 are the same as c1 & c2 for k=0):
if j >= 1
f1[j+1,2]=f1[j+1,2]+alpha*v(beta,jd*(A_j00-alpha*dj1),A_j10-alpha*dj1,-1)
f1[j+1,3]=f1[j+1,3]+alpha*v(beta,jd*(A_j00-alpha*dj1),A_j10-alpha*dj1, 1)
end
# Now for the outer planet:
# Outer planet coefficients, in order k=0,-2,2,-1,1 (see Table 1):
one_over_alpha_squared = 1/(alpha*alpha)
if j >= 2
f2[j+1,1]=u(kappa,-jd*(A_j00-dj1*one_over_alpha_squared),A_j01-dj1*one_over_alpha_squared)
f2[j+1,2]=u(kappa-1,-jd*(jd*A_j00-0.5*A_j01-0.5*dj1*one_over_alpha_squared),jd*A_j01-0.5*A_j02-dj1*one_over_alpha_squared)
f2[j+1,3]=u(kappa+1,-jd*(-jd*A_j00-0.5*A_j01+1.5*dj1*one_over_alpha_squared),-jd*A_j01-0.5*A_j02+dj1*one_over_alpha_squared)
f2[j+1,4]=u(kappa-1/(alpha*sqrtalpha),-jd*(-jd*A_j00-0.5*A_j10),-jd*A_j01-0.5*A_j11)
else
if j == 1
f2[j+1,1]=u(kappa,-jd*(A_j00-dj1*one_over_alpha_squared),A_j01-dj1*one_over_alpha_squared)
f2[j+1,2]=u(kappa-1,-jd*(jd*A_j00-0.5*A_j01-0.5*dj1*one_over_alpha_squared),jd*A_j01-0.5*A_j02-dj1*one_over_alpha_squared)
f2[j+1,3]=u(kappa+1,-jd*(-jd*A_j00-0.5*A_j01+1.5*dj1*one_over_alpha_squared),-jd*A_j01-0.5*A_j02+dj1*one_over_alpha_squared)
end
end
f2[j+1,5]=u(kappa+1/(alpha*sqrtalpha),-jd*(jd*A_j00-0.5*A_j10-2.0*dj1*one_over_alpha_squared),jd*A_j01-0.5*A_j11-2.0*dj1*one_over_alpha_squared)
# Add in the k=\pm 2 coefficients (note that d1 & d2 are the same as c1 & c2 for k=0):
if j >= 1
f2[j+1,2]=f2[j+1,2]+v(kappa,-jd*(A_j00-dj1*one_over_alpha_squared),A_j01-dj1*one_over_alpha_squared,-1)
f2[j+1,3]=f2[j+1,3]+v(kappa,-jd*(A_j00-dj1*one_over_alpha_squared),A_j01-dj1*one_over_alpha_squared, 1)
end
# That's it!
end
return
end
| TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | code | 3056 | include("ttv_nplanet.jl")
"""
ttv_wrapper(tt0)
Adds mean linear ephemeris and pairwise TTVs from TTVFaster to yield transit times.
Currently doesnt account for skipped transits?
Arguments:
tt0: Initial times of transit for planets
nplanet: Number of planets to model
ntrans: Number of transits for each planet in model
params: Holds 5 elements of each planet: mass_ratio,period,trans0,ecosw,esinw
jmax: Maximum j over which to sum the TTV calculation for both planets
Returns:
tts: The model transit times for the observed planets with model TTVs for the system.
"""
function ttv_wrapper(tt0::Vector{Float64},nplanet::Int64,ntrans::Vector{Int64},params::Vector{T},jmax::Integer) where T<:Real
# Call ttv_nplanet:
ttv = ttv_nplanet(nplanet,jmax,ntrans,params[1:5*nplanet])
# We measure transit times,not TTVs,so add back in the linear ephemeris:
tts=Float64[]
for iplanet in 1:nplanet
t0=params[(iplanet-1)*5+3]
per=params[(iplanet-1)*5+2]
n=ntrans[iplanet]
tt=collect(range(t0,stop=t0+per*(n-1),length=n))
# time1=collect(t0.+ range(0,stop=n-1,length=n) .*per)
# println(time1[1], " VS ", tt[1])
# println("For planet ",iplanet,", T0=",t0," per=", per," ntrans=",n)
tt.+=ttv[iplanet,1:n]
append!(tts,tt)
end
return tts
end
"""
If nplanet=2 and we want to model that planet 2 hosts a satellite in a circular orbit:
Arguments (if different from above):
params: Holds 5 elements of each planet, plus 3 for satellite
treat_PMB_as_planet: Whether to treat the Planet-Moon Barycenter as an observed planet for p2 (i.e. for case where Earth has no Moon)
Returns:
tts: The model transit times for the observed planets with model TTVs for the system.
"""
function ttv_wrapper(tt0::Vector{Float64},nplanet::Int64,ntrans::Vector{Int64},params::Vector{T},jmax::Integer,treat_PMB_as_planet::Bool) where T<:Real
# If transit times of additional planets were observable these would need to be added in.
n1,n2 = ntrans[1:2]
# println(n1, " ", n2)
# Call ttv_nplanet:
ttv = ttv_nplanet(nplanet,jmax,ntrans,params[1:5*nplanet])
# We measure transit times,not TTVs,so add back in the linear ephemeris:
t01 = params[3]
per1 = params[2]
tt1 = collect(range(t01,stop = t01+per1*(n1-1),length = n1))
for i=1:n1
tt1[i]+= ttv[1,i]
end
t02 = params[8]
per2 = params[7]
tt2 = collect(range(t02,stop = t02+per2*(n2-1),length = n2))
# If modeling satellite in circular orbit about another planet, these lines need modification for different choices of parameters.
for i=1:n2
if treat_PMB_as_planet
tt2[i] += ttv[2,i]
else
# Compute the TTVs of planet 2, given satellite:
# Satellite needs 3 elements in params: (tsinphi0,tcosphi0,deltaphi) , where t is maximum amplitude of TTVs on planet 2
tsinphi0 = params[end-2]
tcosphi0 = params[end-1]
deltaphi = params[end]
tt2[i] += ttv[2,i] + tsinphi0*cos((i-1)*deltaphi) + tcosphi0*sin((i-1)*deltaphi)
end
end
return [tt1;tt2]
end
| TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | code | 622 | using TTVFaster
using Test
using DelimitedFiles
include("../examples/test_ttv.jl")
function kepler62_test()
data=readdlm("../examples/kepler62ef_planets.txt",',',Float64)
@time ttv1,ttv2=test_ttv(5,40,20,data[1:10],false)
inner=readdlm("../examples/inner_ttv.txt")
outer=readdlm("../examples/outer_ttv.txt")
inner_ref=readdlm("inner_ttv.txt.ref")
outer_ref=readdlm("outer_ttv.txt.ref")
diffs=[maximum(abs.(inner_ref .- inner)) maximum(abs.(outer_ref .- outer))]
max_diff=maximum(diffs)
return round(max_diff)
end
@testset "TTVFaster.jl" begin
@test kepler62_test() == (0.0)
end
| TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.2.1 | c3964c133326d031d4da5f539355a77aae9fe6ce | docs | 2903 | # TTVFaster
First order eccentricity transit timing variations (TTVs) computed in Agol & Deck (2015)
[](https://ui.adsabs.harvard.edu/abs/2016ApJ...818..177A/abstract) [](http://arxiv.org/abs/1509.01623)
This implements equation (33) from that paper by computing the Laplace
coefficients using a series solution due to Jack Wisdom, computing
the $f_{1,j}^{(\pm k)}$ coefficients given in equation (34) using the functions $u$ and
$v_{\pm}$ with coefficients given in Table 1.
## Installation
You can install the registered TTVFaster repo as a Julia package with the `Pkg` manager.
- the repo from the package registry has been tested on Julia v1.6.0
```julia
julia> using Pkg Pkg.add("TTVFaster.jl")
```
In its current state, the package computes the TTVs of a multi-transiting planetary system where at least 2 planets are observed to be transiting.
If you intend to modify the source code for any reason, please create a GitHub fork to develop your own version.
- make sure to replace `your-GitHub-username` with your actual GitHub username in the code below
```julia
julia> Pkg.develop(PackageSpec(url="[email protected]:your-GitHub-username/TTVFaster.jl.git"))
```
## Usage
TTVFaster computes TTVs with respect to 5 properties for each planet: $\mu$, $T_0$ , $P$, $e \cos(\omega)$ , $e \sin(\omega)$;
where $\mu$ is the mass ratio of the planet to the star ($m_p/M_{\star}$), $T_0$ is the initial transit time (of the averaged orbit), $P$ is the mean orbital period, $e$ is the eccentricity, and $\omega$ is the longitude of periastron.
### Examples
#### Kepler-62e/f example
The file kepler62ef_planets.txt in the examples/ directory contains
a comma-separated set of 10 parameters that describe a system with two planets similar to Kepler-62e/f.
``` julia
julia> using TTVFaster,DelimitedFiles
julia> data=readdlm("kepler62ef_planets.txt",',',Float64)
1x10 Array{Float64,2}:
3.02306e-5 122.386 -16.5926 -0.00127324 0.0026446 1.67874e-5 267.307 155.466 -0.0025544 0.00117917
julia> include("test_ttv.jl")
test_ttv (generic function with 4 methods)
julia> @time ttv1,ttv2=test_ttv(5,40,20,data); # inputs are jmax,ntrans1,ntrans2,data
0.982326 seconds (2.04 M allocations: 98.466 MiB, 12.08% gc time)
julia> @time ttv1,ttv2=test_ttv(5,40,20,data);
0.001171 seconds (331 allocations: 21.922 KiB)
```
This computes the TTVs and writes them to the files inner_ttv.txt and outer_ttv.txt in the examples/ directory.
Note that the TTVs are stored in the variables ttv1 and ttv2, as well.
The test_ttv.jl routine accepts jmax (the maximum $j$ to sum to, in this example 5),
ntrans1 (number of transits of the inner planet), ntrans2 (the number of transits of the outer planet),
and data which contains the parameters of both planets.
| TTVFaster | https://github.com/bmlindor/TTVFaster.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 1008 | using GuessworkQuantumSideInfo
using Documenter
DocMeta.setdocmeta!(GuessworkQuantumSideInfo, :DocTestSetup, :(using GuessworkQuantumSideInfo; using SparseArrays); recursive=true)
previous_GKSwstype = get(ENV, "GKSwstype", "")
ENV["GKSwstype"] = "100"
makedocs(;
modules=[GuessworkQuantumSideInfo],
authors="Eric P. Hanson",
repo="https://github.com/ericphanson/GuessworkQuantumSideInfo.jl/blob/{commit}{path}#L{line}",
sitename="GuessworkQuantumSideInfo.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://ericphanson.github.io/GuessworkQuantumSideInfo.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Examples" => "examples.md",
"High precision example" => "high-precision-example.md",
"Mixed-integer SDP example" => "mixed-integer-SDP.md",
],
)
deploydocs(;
repo="github.com/ericphanson/GuessworkQuantumSideInfo.jl",
)
ENV["GKSwstype"] = previous_GKSwstype;
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 4164 | using GuessworkQuantumSideInfo: ket, dm, BB84_states, iid_copies, randdm, guesswork,
guesswork_upper_bound, guesswork_MISDP
using StableRNGs, UnPack
using Mosek, Gurobi, Pajarito, Cbc, SCS
if !@isdefined(GRB_ENV)
const GRB_ENV = Gurobi.Env()
end
function Y_states(::Type{T}) where {T}
return [begin
θⱼ = T(j) * (T(2) * π) / T(3)
dm(cos(θⱼ) * ket(T, 1, 2) + sin(θⱼ) * ket(T, 2, 2))
end
for j in 1:3]
end
function two_random_qubits()
rng = StableRNG(746)
return [randdm(rng, Float64, 2) for _ in 1:2]
end
function three_random_qubits()
rng = StableRNG(514)
return [randdm(rng, Float64, 2) for _ in 1:3]
end
function two_random_qutrits()
rng = StableRNG(948)
return [randdm(rng, Float64, 3) for _ in 1:2]
end
function three_random_qutrits()
rng = StableRNG(219)
return [randdm(rng, Float64, 3) for _ in 1:3]
end
function iid_problem(states, n)
J = length(states)
p = ones(T, J^n) / J^n
ρBs = iid_copies(states, n)
(p=p, ρBs = ρBs)
end
T = Float64
problems = []
for n in 1:2
append!(problems, [
(iid_problem(two_random_qubits(), n)..., numeric_type=T, problem="2qubits($n)"),
(iid_problem(three_random_qubits(), n)..., numeric_type=T, problem="3qubits($n)"),
(iid_problem(Y_states(T), n)..., numeric_type=T, problem="Y($n)"),
(iid_problem(BB84_states(T), n)..., numeric_type=T, problem="BB84($n)"),
(iid_problem(two_random_qutrits(), n)..., numeric_type=T, problem="2qutrits($n)"),
(iid_problem(three_random_qutrits(), n)..., numeric_type=T, problem="3qutrits($n)"),
])
end
algos = []
for (misdp_solver, settings) in [(PajaritoSolver(cont_solver=MosekSolver(LOG=0),
mip_solver=GurobiSolver(GRB_ENV, OutputFlag=0, IntFeasTol=1e-9, FeasibilityTol=1e-8, MIPGap=0),
mip_solver_drives=false, rel_gap = 1e-5,
log_level=0), "Pajarito(Mosek, Gurobi, MSD=false)"),
(PajaritoSolver(cont_solver=MosekSolver(LOG=0),
mip_solver=GurobiSolver(GRB_ENV, OutputFlag=0, IntFeasTol=1e-9, FeasibilityTol=1e-8, MIPGap=1e-5),
mip_solver_drives=true, rel_gap = 1e-5,
log_level=0), "Pajarito(Mosek, Gurobi, MSD=true)"),
(PajaritoSolver(cont_solver=SCSSolver(verbose=false, eps=1e-6), mip_solver=CbcSolver(loglevel=0, integerT=1e-8),
mip_solver_drives=false, rel_gap = 1e-5,
log_level=0), "Pajarito(SCS, Cbc, MSD=false)")]
push!(algos,
(; f = (prob -> @timed guesswork_MISDP(prob.p, prob.ρBs, size(first(prob.ρBs),1); solver=misdp_solver, verbose=false).optval), algo = "MISDP_dB",
settings))
push!(algos,
(; f = (prob -> @timed guesswork_MISDP(prob.p, prob.ρBs, size(first(prob.ρBs), 1)^2; solver=misdp_solver, verbose=false).optval), algo = "MISDP",
settings))
push!(algos,
(; f = (prob -> @timed guesswork_MISDP(prob.p, prob.ρBs, size(first(prob.ρBs), 1);
solver=misdp_solver, verbose=false).optval), algo = "MISDP (dB^2)", settings))
end
for (sdp_solver, settings) in
[((() -> MosekSolver(LOG=0)), "Mosek"), ((() -> SCSSolver(verbose=0, eps=1e-6)), "SCS")]
push!(algos,
(; f = (prob -> @timed guesswork(prob.p, prob.ρBs; solver=sdp_solver(), verbose=false).optval), algo = "SDP", settings))
push!(algos,
(; f = (prob -> @timed guesswork(prob.p, prob.ρBs; dual=true, solver=sdp_solver(), verbose=false).optval), algo = "dual_SDP", settings))
for max_time in (20, 60, 60*4)
push!(algos,
(; f = (prob -> @timed guesswork_upper_bound(prob.p, prob.ρBs; make_solver=sdp_solver, verbose=false, max_time).optval), algo = "guesswork_upper_bound(max_time=$max_time)", settings))
end
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 466 | # https://github.com/JuliaLang/julia/issues/28679
function rm_stdout(f::Function)
nullfile = @static Sys.iswindows() ? "nul" : "/dev/null"
open(nullfile, "w") do io
redirect_stdout(f, io)
end
end
rm_stdout() do
include(joinpath(@__DIR__, "common.jl"))
end
algo_idx = parse(Int, ARGS[1])
problem_idx = parse(Int, ARGS[2])
algo = algos[algo_idx]
prob = problems[problem_idx]
optval, elapsed = algo.f(prob)
println(optval)
println(elapsed)
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 6365 | using CSV, DataFrames, Statistics
using Printf
df = CSV.read(joinpath(@__DIR__, "results.csv"))
## This bit is a little annoying:
# To check for OOMs, we need to manually look through the log (see `log.md`)
# Which was numbered according to the algorithm and problem index.
# However, later two more algorithms were added (`MISDP_dB` and `MISDP (dB^2)`)
# which can mess up the numbering. So first we will filter out those to mimick
# the state of the first run, and add flags for OOMs.
include("common.jl") # need to load the list of algorithms (`algos`)
algos_pre_dB = filter(algos) do a
a.algo != "MISDP_dB" && a.algo != "MISDP (dB^2)"
end
# Looking through `log.md`, we find two issues:
# out of memory (OOM) errors during 4 solves, and unknown errors in 2 solves.
OOM = [(problem=10, algo=4), (problem=10, algo=5), (problem=10, algo=9),
(problem=10, algo=10)]
unknown_error = [(problem=12, algo=4), (problem=12, algo=9)]
# Let us record the OOM errors in the dataframe.
inds = falses(size(df, 1))
for (p_idx, a_idx) in OOM
inds .= inds .| ((df.problem .== problems[p_idx].problem) .& (df.algo .== algos_pre_dB[a_idx].algo) .& (df.settings .== algos_pre_dB[a_idx].settings))
end
df.OOM = inds
## End of problem-index specific content.
# Now we will filter out the `MISDP` algorithm (which had `dB^2 + 1` entries, which
# is 1 more than needed, and is replaced by the `MISDP (dB^2)` version.
df = filter(df) do row
row.algo != "MISDP"
end
df.timeout = isnan.(df.optval) .& (!).(df.errors)
df.error_not_solved_not_timeout = df.errors .& isnan.(df.optval) .& (!).(df.timeout)
# Now we will caculate discrepencies in `optval`
# First we will filter to rows that claim to have a correct answer
row_should_be_correct(row) = !startswith(row.algo, "guesswork_upper_bound") && !isnan(row.optval) && row.algo != "MISDP_dB"
df_right = filter(row_should_be_correct, df)
gdf_right = groupby(df_right, "problem")
# Check for BB84(1)
bb84_true_val = (big(1) / big(4)) * (10 - sqrt(big(10)))
@assert all(gdf_right[4].problem .== Ref("BB84(1)")) # check we've got the right group
@assert maximum((gdf_right[4].optval .- bb84_true_val ) ./ bb84_true_val) < 1e-7 # not too big of an error
mean_solutions_df = combine(gdf_right, :optval => mean)
mean_solutions = Dict(mean_solutions_df.problem .=> mean_solutions_df.optval_mean)
# Now that we've calculated the mean solutions, we calculate the discrepencies
df.discrepency_from_mean = [ isnan(row.optval) ? NaN : !haskey(mean_solutions, row.problem) ? missing : abs(row.optval - mean_solutions[row.problem]) for row in eachrow(df)]
df.relative_discrepency_from_mean = [ isnan(row.optval) ? NaN : !haskey(mean_solutions, row.problem) ? missing : abs(row.optval - mean_solutions[row.problem])/mean_solutions[row.problem] for row in eachrow(df)]
# Let us check the maximum discrepency is not too bad
df_right = filter(row_should_be_correct, df)
findmax(df_right.relative_discrepency_from_mean) # (1.7202621117336732e-6, 45)
# Now let us generate the final table.
## Rows in table: algorithm with settings
## Columns in table: # solved, # timeout, # errored, avg relative_discrepency_from_mean, average time
df.algo_settings = df.algo .* Ref(" ") .* df.settings
function pretty_algo(row)
if startswith(row.algo, "guesswork_upper_bound")
return "Upper bound"
end
if row.algo == "MISDP_dB"
return "MISDP (\$d_B\$)"
elseif row.algo == "MISDP"
return "MISDP (\$d_B^2 + 1\$)"
elseif row.algo == "MISDP (dB^2)"
return "MISDP (\$d_B^2\$)"
elseif row.algo == "dual_SDP"
return "SDP (dual)"
end
return row.algo
end
function pretty_settings(row)
n = length("guesswork_upper_bound")
if startswith(row.algo, "guesswork_upper_bound")
j = findfirst(==('='), row.algo)
max_time = row.algo[j+1:end-1]
return row.settings * ", \$t_\\text{max}=$(max_time)\$"
end
if row.settings == "Pajarito(Mosek, Gurobi, MSD=true)"
return "Pajarito (c1)"
elseif row.settings == "Pajarito(Mosek, Gurobi, MSD=false)"
return "Pajarito (c2)"
elseif row.settings == "Pajarito(SCS, Cbc, MSD=false)"
return "Pajarito (o)"
end
return row.settings
end
df.algo2 = [ pretty_algo(row) for row in eachrow(df) ]
df.settings2 = [ pretty_settings(row) for row in eachrow(df) ]
gdf = groupby(df, [:algo2, :settings2])
function mean_no_nan(collection)
c = [x for x in collection if !ismissing(x) && !isnan(x)]
isempty(c) ? missing : mean(c)
end
float_fmt(f) = @sprintf("%2.2f", f)
pct_fmt(f) = float_fmt(f*100) * "\\,\\%"
time_fmt(f) = float_fmt(f) * "\\,s"
table1 = combine(gdf, :relative_discrepency_from_mean => pct_fmt ∘ mean_no_nan => "average relative error", :elapsed_seconds => time_fmt ∘ mean_no_nan => "average time", :optval => (x -> sum((!isnan).(x))) => "solved", :timeout => sum => "timeouts", :error_not_solved_not_timeout => sum => "errors")
sort!(table1, [:algo2, :settings2])
table1 = rename(table1, :algo2 => :Algorithm, :settings2 => :Parameters)
df.time_status = ifelse.(df.timeout, Ref(:timeout), ifelse.(df.error_not_solved_not_timeout, Ref(:error), df.elapsed_seconds))
df2 = copy(df)
function tol_time(row)
disc = ismissing(row.relative_discrepency_from_mean) ? "(?\\,\\%)" : isnan(row.relative_discrepency_from_mean) ? "" : "("* pct_fmt(row.relative_discrepency_from_mean)*")"
time = row.time_status isa Symbol ? string(row.time_status) : time_fmt(row.elapsed_seconds)
disc == "" ? time : "$time $disc"
end
df2.value = [tol_time(row) for row in eachrow(df2) ]
df_times = unstack(df2, [:algo2, :settings2], :problem, :value)
problem_stems = unique([ str[1:end-3] for str in df2.problem])
nms = names(df_times, Not(["algo2", "settings2"]))
for stem in problem_stems
df_times[:, stem] = ["$(row[1]), $(row[2])" for row in eachrow(df_times[:, nms[findall(startswith(stem), nms)]])]
end
table2 = df_times[:, ["algo2", "settings2", problem_stems...]]
sort!(table2, [:algo2, :settings2])
table2 = rename(table2, :algo2 => :Algorithm, :settings2 => :Parameters)
using Latexify
latexify(table1, env=:table, latex=false)
latexify(table2[:,["Algorithm", "Parameters", "2qubits", "2qutrits", "Y"] ], env=:table, latex=false)
latexify(table2[:,["Algorithm", "Parameters", "3qubits", "3qutrits", "BB84"] ], env=:table, latex=false)
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 2857 | using ProgressMeter, CSV
function write_results(algo, prob, optval = NaN, elapsed_seconds = NaN; warnings = false, errors = false)
row = (;algo.algo, algo.settings, prob.numeric_type, prob.problem, optval, elapsed_seconds, warnings, errors)
CSV.write(joinpath(@__DIR__, "results.csv"), [row], append = isfile(joinpath(@__DIR__, "results.csv")))
end
isfile(joinpath(@__DIR__, "results.csv")) && error("`results.csv` already exists")
include("common.jl")
# The idea is these can be run in parallel, and the thunks that write to the CSV returned,
# then those could be executed serially.
# It's probably more robust to have each process write a separate one-line CSV
# and then collect them at the end, though.
function run_problem(algo_idx, problem_idx; verbose = true)
algo = algos[algo_idx]
prob = problems[problem_idx]
out = Pipe()
err = Pipe()
proc = run(pipeline(`julia --project=$(@__DIR__) do_problem.jl $(algo_idx) $(problem_idx)`, stdout=out, stderr=err), wait=false)
time_elapsed = 0.0
if verbose
meter = Progress(timeout*2, desc = "Problem $problem_idx/$n_problems, algorithm $algo_idx/$n_algos ")
end
while time_elapsed < timeout && Base.process_running(proc)
sleep(0.5)
verbose && next!(meter)
time_elapsed += 0.5
end
verbose && finish!(meter)
if Base.process_running(proc)
verbose && println("Timed out!")
Base.kill(proc)
f = () -> write_results(algo, prob)
else
verbose && println("Finished!")
close(err.in)
close(out.in)
errors_text = String(read(err))
if !isempty(errors_text) && verbose
@error errors_text
end
results = split(chomp(String(read(out))), '\n')
warnings = false
if length(results) > 2
verbose && @warn join(results[1:end-2], "\n")
warnings = true
end
verbose && @info results
if length(results) < 2
verbose && @error "Not enough returns, something went wrong"
errors = true
optval, elapsed = NaN, NaN
else
optval, elapsed = parse.(Float64, results[end-1:end])
end
f = () -> write_results(algo, prob, optval, elapsed; errors = !isempty(errors_text), warnings)
end
return f
end
n_problems = length(problems)
n_algos = length(algos)
timeout = 60*5
for problem_idx in 1:n_problems, algo_idx in 1:n_algos
# To run just a subset of the problems, a filter can be put such as
# the following, which were used when I added algorithms after the first run.
## algos[algo_idx].algo == "MISDP_dB" || continue
## algos[algo_idx].algo == "MISDP (dB^2)" || continue
f = run_problem(algo_idx, problem_idx; verbose = true)
f()
end
df = CSV.read(joinpath(@__DIR__, "results.csv"))
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 894 | module GuessworkQuantumSideInfo
using LinearAlgebra, SparseArrays # standard libraries
using Random: Random, randperm!, AbstractRNG # standard library
using Convex # Julia SDP solvers / interfaces
using Combinatorics: multiset_permutations, combinations # used for POVM outcomes
using UnPack # helper
# Basic functions for quantum states and examples
export ket, bra, ⊗, I, dm, randdm, randprobvec, iid_copies, BB84_states
include("quantum_states.jl")
# The basic SDP routine
export guesswork
include("SDP_formulation.jl")
export guesswork_lower_bound
include("lower_bound.jl")
# Re-run the SDP adding constraints one at a time
export guesswork_upper_bound
include("upper_bound.jl")
# reformulate as a mixed-integer SDP
export guesswork_MISDP
include("MISDP_formulation.jl")
# Utilities for analyzing the POVMs resulting from the SDPs
export pmfN
include("analyze_measurements.jl")
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 4804 | """
guesswork_MISDP(
p::AbstractVector{T},
ρBs::AbstractVector{<:AbstractMatrix},
num_outcomes = size(ρBs[1], 1)^2;
solver,
c = T.(1:length(p)),
verbose::Bool = true,
) where {T<:Number} -> NamedTuple
Computes an approximation to the guesswork with `num_outcomes` possible guessing
orders for the c-q state specified by a probability vector `p`, giving the
distribution `X`, and `ρBs`, giving the associated quantum states. If
``num_outcomes ≥ d_B^2``, where `d_B` is the dimension of the Hilbert space on
which the quantum states live, then this computes exactly the guesswork. Note
that one may not supply a parameter `K` in this case; in the mixed-integer SDP
formulation, the guesser is always allowed to make `length(p)` guesses. However,
a custom cost vector `c` may be supplied to choose how to penalize each possible
number of guesses required to get the correct answer.
The size of the mixed-integer SDP solved by this function grows polynomially in
`length(p)`, the dimension `d_B`, and `num_outcomes`, but mixed-integer SDPs are
not known to be efficiently computable in general.
The keyword argument `solver` must be supplied with a solver capable of solving
mixed-integer SDPs. Currently, this means
[Pajarito.jl](https://github.com/JuliaOpt/Pajarito.jl) which solves
mixed-integer SDPs by solving an alternating sequence of mixed-integer linear
programs and SDPs. See the documentation for an example. Note that the
performance of Pajarito depends on the performance of the underlying
mixed-integer linear solver and SDP solver it is given, and that commerical
solvers often have academic licenses and can be much more performant.
The keyword argument `verbose` prints the status, optimal value, optimal
guessing orders, and POVMs, in addition to warnings from Convex.jl if the
problem is not optimally solved.
"""
function guesswork_MISDP(
p::AbstractVector{T},
ρBs::AbstractVector{<:AbstractMatrix},
num_outcomes = size(ρBs[1], 1)^2;
solver,
c = T.(1:length(p)),
verbose::Bool = true,
) where {T<:Number}
length(p) == length(ρBs) ||
throw(ArgumentError("Length of prior and vector of side information must match J"))
J = length(p)
dB = size(ρBs[1], 1)
all(ρB -> size(ρB) == (dB, dB), ρBs) ||
throw(ArgumentError("All side-information states must be square matrices of the same size"))
constraints = Convex.Constraint[]
Es = [Convex.ComplexVariable(dB, dB) for _ = 1:num_outcomes]
for E in Es
push!(constraints, E ⪰ 0)
end
πs = [reshape(Convex.Variable(J^2, :Bin), J, J) for _ = 1:num_outcomes]
for j = 1:num_outcomes, i = 1:J
push!(constraints, sum(πs[j][i, :]) == 1)
push!(constraints, sum(πs[j][:, i]) == 1)
end
push!(constraints, sum(Es) == I(dB))
Λs = []
for k = 1:num_outcomes
Λre = [Convex.Variable(J, J) for a = 1:dB, b = 1:dB]
Λim = [Convex.Variable(J, J) for a = 1:dB, b = 1:dB]
push!(Λs, Λre + im * Λim)
π = πs[k]
E = Es[k]
E_U = dB / 2
E_L = -1 * dB / 2
for a = 1:dB, b = 1:dB
Λab_re = Λre[a, b]
Λab_im = Λim[a, b]
push!(constraints, Λab_re >= π * E_L)
push!(constraints, Λab_re <= π * E_U)
push!(constraints, Λab_re >= real(E[a, b]) + π * E_U - E_U)
push!(constraints, Λab_re <= real(E[a, b]) + π * E_L - E_L)
push!(constraints, Λab_im >= π * E_L)
push!(constraints, Λab_im <= π * E_U)
push!(constraints, Λab_im >= imag(E[a, b]) + π * E_U - E_U)
push!(constraints, Λab_im <= imag(E[a, b]) + π * E_L - E_L)
end
end
ρBs_subnormalized = p .* ρBs
vs = [
[
real(sum(
ρBs_subnormalized[i][a, b] * Λ[b, a][j, i] for a = 1:dB for b = 1:dB
for i = 1:J
))
for j = 1:J
]
for Λ in Λs
]
objective = sum(v[j] * c[j] for j = 1:J for v in vs)
for v in vs, j = 1:J-1
push!(constraints, v[j] >= v[j+1])
end
prob = compatible_problem(Convex.minimize, objective, constraints, T)
if verbose
@info "Starting MISDP solve"
end
Convex.solve!(prob, solver; verbose = verbose)
optval = prob.optval
status = prob.status
πs_perm = [Tuple(round.(Int, (evaluate(π) * collect(1:J)))) for π in πs]
Es = evaluate.(Es)
if verbose
@info "MISDP solve"
@show status
@show optval
@show πs_perm
@show Es
end
data = (J = J, K = J, c = c, p = p, ρBs = ρBs, dB = dB)
return (
optval = optval,
Es = Es,
povm_outcomes = πs_perm,
num_outcomes = num_outcomes,
data...,
)
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 6196 | """
N(g, x) -> Int
Given a sequence of guesses `g`, returns the number of guesses needed to
correctly guess `x`. Returns `length(g)+1` if the correct answer is never
guesses.
"""
function N(g, x)
i = findfirst(==(x), g)
if i === nothing
return length(g) + 1
else
return i
end
end
"""
cartesian_power(J, K) -> iterator
Returns an interator over the set of tuples of length `K` with elements in `1:J`.
# Example
```jldoctest
julia> collect(GuessworkQuantumSideInfo.cartesian_power(3,2))
9-element Array{Tuple{Int64,Int64},1}:
(1, 1)
(2, 1)
(3, 1)
(1, 2)
(2, 2)
(3, 2)
(1, 3)
(2, 3)
(3, 3)
```
"""
function cartesian_power(J, K)
sz = ntuple(i -> 1:J, K)
inds = CartesianIndices(sz)
return (Tuple(inds[y]) for y = 1:J^K)
end
"""
make_povm_outcomes(J, K, remove_repetition)
Returns an iterator over tuples or vectors each representing a POVM outcome. If
`remove_repetition` is true, it returns ``J!/(J-K)!`` outcomes, vectors of
length `K` with elements in ``[1,...,J]`` such that each vector has no
repetition of elements. If `remove_repetition` is false, it returns all ``J^K``
elements of ``[1, ..., J]^K``.
"""
function make_povm_outcomes(J, K, remove_repetition::Bool)
if remove_repetition
return multiset_permutations(1:J, K)
else
return cartesian_power(J, K)
end
end
"""
compatible_problem(f, objective, constraints, numeric_type) -> Convex.Problem
Returns a `Convex.Problem` object corresponding to `f`, which should be either
`maximize` or `minimize`, the objective and constraints given as arguments, and
the `numeric_type`. Since only Float64 numeric types are supported on Convex
versions 0.12 and below, the argument `numeric_type` is only passed to `f` when
it is different from `Float64`.
"""
function compatible_problem(f, objective, constraints, numeric_type)
if numeric_type != Float64
f(objective, constraints; numeric_type = numeric_type)
else
f(objective, constraints)
end
end
"""
guesswork(
p::AbstractVector{T},
ρBs::AbstractVector{<:AbstractMatrix};
solver,
K::Integer = length(p),
c = T[1:K..., 5_000],
dual::Bool = false,
remove_repetition::Bool = true,
povm_outcomes = make_povm_outcomes(length(p), K, remove_repetition),
verbose::Bool = true,
)
Computes the guesswork for the c-q state specified by a probability vector `p`,
giving the distribution `X`, and `ρBs`, giving the associated quantum states.
The keyword arguments are as follows:
* `solver` is the only required keyword argument; an SDP solver such as SCS or
MOSEK must be passed.
* `K` corresponds to the maximum number of allowed guesses. The number of
variables in the primal SDP (and the number of constraints in the dual SDP)
scales as `length(p)^K`.
* `c` may be given a custom cost vector. If `K < length(p)`, then `c` should be
of length `K+1`. The last entry, `c[K+1]`, corresponds to the cost of not
guessing the correct answer within `K` guesses.
* `dual` is a boolean variable indicating whether the primal or dual
optimization problem should be solved.
* `remove_repetition` is a boolean variable defaulting to true, indicating
whether repeated guesses of the same value should be removed; as long as `c`
is increasing, this decreases the size of the SDP without affecting the
optimal value.
* `povm_outcomes` should be an iterator (or vector) corresponding to the
possible guessing orders. This defaults to all subsets of length `K` of
`1:length(p)` without repetition.
* `verbose` is a boolean which indicates if warnings should be printed when the
problem is not solved optimally.
"""
function guesswork(
p::AbstractVector{T},
ρBs::AbstractVector{<:AbstractMatrix};
solver,
K::Integer = length(p),
c = T[1:K..., 5_000],
dual::Bool = false,
remove_repetition::Bool = true,
povm_outcomes = make_povm_outcomes(length(p), K, remove_repetition),
verbose::Bool = true,
debug::Bool = false,
) where {T<:Number}
length(p) == length(ρBs) ||
throw(ArgumentError("Length of prior and vector of side information must match J"))
J = length(p)
dB = size(ρBs[1], 1)
all(ρB -> size(ρB) == (dB, dB), ρBs) ||
throw(ArgumentError("All side-information states must be square matrices of the same size"))
R(g) = sum(c[N(g, x)] * p[x] * ρBs[x] for x in eachindex(p, ρBs))
num_outcomes = length(povm_outcomes)
if dual
Y = ComplexVariable(dB, dB)
constraints = Convex.Constraint[R(outcome) ⪰ Y for outcome in povm_outcomes]
push!(constraints, Y' == Y)
objective = real(tr(Y))
problem = compatible_problem(Convex.maximize, objective, constraints, T)
solve!(problem, solver; verbose = verbose)
# introduced for debugging a problem with SCS with MathOptInterface
if debug
@show length(constraints)
for outcome in povm_outcomes
@show outcome
@show eigmin(Hermitian(R(outcome) - evaluate(Y)))
end
end
Y = evaluate(Y)
# SDP dual values not implemented in Convex.jl yet
# Es = [Matrix{ComplexF64}(problem.constraints[y].dual) for y = eachindex(povm_outcomes)]
Es = missing
else
Es = [ComplexVariable(dB, dB) for _ = 1:num_outcomes]
objective =
real(sum([tr(Es[y] * R(outcome)) for (y, outcome) in enumerate(povm_outcomes)]))
constraints = Convex.Constraint[E ⪰ 0 for E in Es]
push!(constraints, sum(Es) == I(dB))
problem = compatible_problem(Convex.minimize, objective, constraints, T)
solve!(problem, solver; verbose = verbose)
Es = evaluate.(Es)
# SDP dual values not implemented in Convex.jl yet
# Y = Matrix{ComplexF64}(problem.constraints[end].dual)
Y = missing
end
input_data = (J = J, K = K, c = c, p = p, ρBs = ρBs, dB = dB)
return (
optval = problem.optval,
status = problem.status,
Y = Y,
Es = Es,
povm_outcomes = povm_outcomes,
input_data...,
)
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 1595 | """
pmfN(data; tol = 1e-5) -> Vector
Compute the probability mass function for the number of guesses `N`, given a
strategy. The `n`th entry of the output vector gives the probability for
guessing the correct answer on the `n`th try, for `n = 1 : K`. If the number of
allowed guesses, `K`, is smaller than `length(p)`, then there is an additional
last entry which gives the probability of never guessing the correct answer.
* `data` should be a `NamedTuple` with entries for `p`, `ρBs`, `Es`, `K`, and
`povm_outcomes`
* `tol` provides a tolerance above which to warn about imaginary
or negative probabilities.
"""
function pmfN(data; tol = 1e-5)
@unpack p, K = data
T = eltype(p)
probs = [_prob(n, data, tol, T) for n = 1:K]
if K < length(p)
prob_no_guess = 1 - sum(probs)
if prob_no_guess < 0
prob_no_guess < -tol && @warn "Probability negative " prob_no_guess
prob_no_guess = zero(T)
end
push!(probs, prob_no_guess)
end
return probs
end
function _prob(n, data, tol, T)
@unpack Es, p, ρBs, povm_outcomes = data
prob = zero(T)
for (y, outcome) in enumerate(povm_outcomes)
for x in eachindex(p, ρBs)
if findfirst(==(x), outcome) == n
prob += p[x] * tr(Es[y] * ρBs[x])
end
end
end
if !isreal(prob)
abs(imag(prob)) > tol && @warn "Imaginary probability" n prob
end
prob = real(prob)
if prob < 0
prob < -tol && @warn "Probability negative " n prob
prob = zero(T)
end
return prob
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 1589 |
"""
guesswork_lower_bound(
p::AbstractVector{T},
ρBs::AbstractVector{<:AbstractMatrix};
solver,
c = T[1:length(p)..., 10_000],
verbose::Bool = false,
)
See [`guesswork`](@ref) for the meaning of the arguments. Computes a lower bound
to the optimal expected number of guesses by solving a relaxed version of the
primal SDP. For `J` states, only needs `J^2` PSD variables subject to two linear
constraints.
"""
function guesswork_lower_bound(
p::AbstractVector{T},
ρBs::AbstractVector{<:AbstractMatrix};
solver,
c = T[1:length(p)..., 10_000],
verbose::Bool = false,
) where {T<:Number}
length(p) == length(ρBs) ||
throw(ArgumentError("Length of prior and vector of side information must match J"))
J = length(p)
dB = size(ρBs[1], 1)
all(ρB -> size(ρB) == (dB, dB), ρBs) ||
throw(ArgumentError("All side-information states must be square matrices of the same size"))
ℰs = [ComplexVariable(dB, dB) for j = 1:J, k = 1:J]
objective = real(sum(c[j] * tr(ℰs[j, k] * p[k] * ρBs[k]) for j = 1:J for k = 1:J))
constraints = Convex.Constraint[ℰ ⪰ 0 for ℰ in ℰs] |> vec
for j = 1:J
push!(constraints, sum(ℰs[j, :]) == I(dB))
push!(constraints, sum(ℰs[:, j]) == I(dB))
end
problem = compatible_problem(Convex.minimize, objective, constraints, T)
solve!(problem, solver; verbose = verbose)
ℰs = evaluate.(ℰs)
data = (J = J, K = J, c = c, p = p, ρBs = ρBs, dB = dB)
return (optval = problem.optval, status = problem.status, ℰs = ℰs, data...)
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 4653 | ## Notation
"""
ket([T = Float64], i::Integer, d::Integer) -> SparseVector{Complex{T}}
Create a vector representing the `i`th computational basis vector in dimension `d`.
## Example
```jldoctest
julia> ket(1,2)
2-element SparseVector{Complex{Float64},Int64} with 1 stored entry:
[1] = 1.0+0.0im
julia> collect(ans)
2-element Array{Complex{Float64},1}:
1.0 + 0.0im
0.0 + 0.0im
```
"""
ket(::Type{T}, i::Integer, d::Integer) where {T} = sparsevec([i], [one(complex(T))], d)
"""
bra([T = Float64], i::Integer, d::Integer) -> SparseVector{Complex{T}}'
Create a dual vector representing the bra associated to `i`th computational basis vector in dimension `d`.
## Example
```jldoctest
julia> bra(1,2)
1×2 LinearAlgebra.Adjoint{Complex{Float64},SparseVector{Complex{Float64},Int64}}:
1.0-0.0im 0.0-0.0im
julia> collect(ans)
1×2 Array{Complex{Float64},2}:
1.0-0.0im 0.0-0.0im
```
"""
bra(::Type{T}, i::Integer, d::Integer) where {T} = ket(T, i, d)'
ket(i::Integer, d::Integer) = ket(Float64, i, d)
bra(i::Integer, d::Integer) = bra(Float64, i, d)
"""
dm(ψ::AbstractVector)
Creates the density matrix version of a pure state `ψ` via the outer product.
"""
dm(ψ::AbstractVector) = ψ' ⊗ ψ
if VERSION < v"1.2-pre"
(I::UniformScaling)(n::Integer) = Diagonal(fill(I.λ, n))
end
const ⊗ = kron
"""
BB84_states([T::Type = Float64])
Generates the BB84 states `|0⟩`, `|1⟩`, `|-⟩`, and `|+⟩`, for use in [`guesswork`](@ref) or other functions. The
numeric type can be optionally specified by the argument.
"""
function BB84_states end
BB84_states() = BB84_states(Float64)
function BB84_states(::Type{T}) where {T<:Number}
ketplus = (ket(T, 1, 2) + ket(T, 2, 2)) / sqrt(T(2))
ketminus = (ket(T, 1, 2) - ket(T, 2, 2)) / sqrt(T(2))
ketzero = ket(T, 1, 2)
ketone = ket(T, 2, 2)
return dm.([ketzero, ketone, ketminus, ketplus])
end
"""
iid_copies(ρBs::AbstractVector{<:AbstractMatrix}, n::Integer) -> Vector{Matrix}
Create a vector of all states of the form ``ρ_1 \\otimes \\dotsm \\otimes ρ_n``
where the ``ρ_i`` range over the set `ρBs`.
"""
function iid_copies(ρBs::AbstractVector{<:AbstractMatrix}, n::Integer)
inds = cartesian_power(length(ρBs), n)
[foldl(⊗, ρBs[collect(I)]) for I in inds]
end
## Random states
"""
simplexpt(unif)
Takes a vector of length `d-1` of numbers between `0.0` and `1.0` and converts it a point on the standard `d` dimensional simplex.
"""
function simplexpt(unif)
d = length(unif) + 1
T = eltype(unif)
w = zeros(T, d + 1)
w[2:d] .= sort(unif)
w[d+1] = one(T)
diff(w)
end
"""
randprobvec([rng, T=Float64], d)
Generates points of type `T`, uniformly at random on the standard `d-1` dimensional simplex using an algorithm by [Smith and Tromble](http://www.cs.cmu.edu/~nasmith/papers/smith+tromble.tr04.pdf).
## Example
```julia
julia> randprobvec(3)
3-element Array{Float64,1}:
0.24815974900033688
0.17199716455672287
0.5798430864429402
```
"""
randprobvec(rng::AbstractRNG, ::Type{T}, d::Integer) where T = simplexpt(rand(rng, T, d - 1))
randprobvec(::Type{T}, d::Integer) where T = randprobvec(Random.GLOBAL_RNG, T, d)
randprobvec(d::Integer) = randprobvec(Float64, d)
"""
randunitary([rng, T=Float64], d)
Generates a unitary matrix of dimension `d` at random according to the Haar measure, using an algorithm described by Maris Ozols in ["How to generate a random unitary matrix"](http://home.lu.lv/~sd20008/papers/essays/Random%20unitary%20%5Bpaper%5D.pdf).
"""
function randunitary(rng::AbstractRNG, ::Type{T}, d::Integer) where T
# `randn` for `BigFloat`'s isn't supported yet
# (https://github.com/JuliaLang/julia/issues/17629)
# so we convert afterwards instead (losing some randomness).
rg1 = T.(randn(rng, d, d))
rg2 = T.(randn(rng, d, d))
RG = rg1 + im * rg2
Q, R = qr(RG)
r = diag(R)
L = diagm(0 => r ./ abs.(r))
return Q * L
end
randunitary(::Type{T}, d::Integer) where {T} = randunitary(Random.GLOBAL_RNG, T, d)
randunitary(d::Integer) = randunitary(Float64, d)
"""
randdm([rng, T = Float64], d)
Generates a density matrix with numeric type `Complex{T}`, of dimension `d` at
random.
## Example
```julia
julia> randdm(2)
2×2 Array{Complex{Float64},2}:
0.477118+0.0im 0.119848-0.0371569im
0.119848+0.0371569im 0.522882+0.0im
```
"""
function randdm(rng::AbstractRNG, ::Type{T}, d::Integer) where T
eigs = diagm(0 => randprobvec(rng, T, d))
U = randunitary(rng, T, d)
ρ = U * eigs * (U')
return Matrix((ρ + ρ') / 2)
end
randdm(::Type{T}, d::Integer) where T = randdm(Random.GLOBAL_RNG, T, d)
randdm(d) = randdm(Float64, d)
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 11905 | """
guesswork_upper_bound(
p::AbstractVector{T},
ρBs::AbstractVector{<:AbstractMatrix};
make_solver,
c::AbstractVector = T.(1:length(p)),
max_retries = 50,
max_time = Inf,
num_constraints = Inf,
verbose::Bool = false,
num_steps_per_SA_run::Integer = length(p)^2 * 500,
) where {T<:Number} -> NamedTuple
Computes an upper bound to the guesswork problem associated to the c-q state
specified by `p` and `ρBs`, as in [`guesswork`](@ref). A custom cost vector `c`
may be optionally passed. If the keyword argument `verbose` is set to true,
information is printed about each iteration of the algorithm.
The keyword argument `make_solver` is required, and must pass a *function that
creates a solver instances*. For example, instead of passing `SCSSolver()`, pass
`() -> SCSSolver()`. This is needed because the algorithm used in
`guesswork_upper_bound` solves a sequence of SDPs, not just one.
The algorithm has three termination criteria which are controlled by keyword
arguments. The algorithm stops when any of the following occur:
* `max_retries` simulated annealing attempts fail to find a violated constraint.
* `num_constraints` constraints have been added to the dual SDP
* The total runtime of the algorithm is projected to exceed `max_time` on the next iteration.
By default, `max_retries` is set to 50, while `num_constraints` and `max_time`
are set to infinity.
Lastly, the keyword argument `num_steps_per_SA_run` controls the runtime of the
simulated annealing algorithm. Increase `num_steps_per_SA_run` to search longer
for a violated constraint within a given simulated annealing run.
"""
function guesswork_upper_bound(
p::AbstractVector{T},
ρBs::AbstractVector{<:AbstractMatrix};
make_solver,
c::AbstractVector = T.(1:length(p)),
max_retries = 50,
max_time = Inf,
num_constraints = Inf,
verbose::Bool = false,
num_steps_per_SA_run::Integer = length(p)^2 * 500,
mutate! = rand_rev!,
debug = false,
) where {T<:Number}
length(p) == length(ρBs) ||
throw(ArgumentError("Length of prior and vector of side information must match J"))
J = length(p)
dB = size(ρBs[1], 1)
all(ρB -> size(ρB) == (dB, dB), ρBs) ||
throw(ArgumentError("All side-information states must be square matrices of the same size"))
if max_retries == Inf && max_time == Inf && num_constraints == Inf
throw(ArgumentError("All three termination criteria (`max_retries`, `max_time`, and `num_constraints`) are infinite; algorithm would never terminate."))
end
if (length(c) < length(p))
throw(ArgumentError("Need `length(c) >= length(p)`."))
end
constraints::Vector{Vector{Int}} = []
convex_problem, convex_Y, convex_R = make_problem(p, ρBs, c, dB, constraints, T)
solve_time::Float64 = 0.0
convex_Y.value = Matrix{Complex{T}}(maximum(c)*I, dB, dB)
upper_bound::T = 0.0
# We optimize over the inverse of the permutations we are interested in
# that way we don't need to take the inverse until the end.
Rg_inv! = let ρBs_subnormalized = [p[x] * ρBs[x] for x = 1:J], c = c
(R, g_inv) -> begin
for x = 1:J
R .+= ρBs_subnormalized[x] .* c[g_inv[x]]
end
R
end
end
# Initialize some variables
π = collect(1:J)
scratch_π = copy(π)
R_scratch::Matrix{Complex{T}} = zeros(Complex{T}, dB, dB)
total_time_so_far::Float64 = 0.0
# run the main loop
# verbose is passed as a "value type" so logging statements can be compiled out when it is set to false.
upper_bound = _loop(
upper_bound,
R_scratch,
constraints,
π,
scratch_π,
convex_R,
convex_problem,
convex_Y,
Rg_inv!,
num_steps_per_SA_run,
total_time_so_far,
max_retries,
max_time,
num_constraints,
mutate!,
make_solver,
T,
Val(verbose),
)
input_data = (J = J, K = J, c = c, p = p, ρBs = ρBs, dB = dB)
return (
optval = upper_bound,
status = convex_problem.status,
Y = evaluate(convex_Y),
povm_outcomes = invperm.(constraints),
input_data...,
)
end
# We put the inner loop behind a function barrier
function _loop(
upper_bound::T,
R_scratch,
constraints,
π,
scratch_π,
convex_R,
convex_problem,
convex_Y,
Rg_inv!,
num_steps_per_SA_run,
total_time_so_far,
max_retries,
max_time,
num_constraints,
mutate!,
make_solver,
::Type{T},
::Val{verbose},
) where {T,verbose}
while length(constraints) < num_constraints
f = let Y::Matrix{Complex{T}} = evaluate(convex_Y)
π -> begin
R_scratch .= -1 .* Y
eigmin(Hermitian(Rg_inv!(R_scratch, π)))::T
end
end
# Choose `fval` as something `> 0` to enter the loop.
fval::T = 1.0
n_tries::Int = 0
SA_time = @elapsed while fval > 0 && n_tries < max_retries
n_tries += 1
# initialize `π` to hold a random permutation, then use the simulated annealing algorithm to try to minimize `f`, starting from this permutation. Repeat until we find a violated constraint, or we've tried enough times.
randperm!(π)
fval = SA!(π, scratch_π, f, mutate!, num_steps_per_SA_run)
end
total_time_so_far += SA_time
if fval < 0
# add the permutation to the list of constraints and resolve the problem.
new_constraint = copy(π)
push!(constraints, new_constraint)
update_problem!(convex_problem, convex_Y, new_constraint, convex_R)
solve_time = @elapsed solve!(convex_problem, make_solver(); verbose = verbose)
upper_bound::T = convex_problem.optval
if verbose
_log(
length(constraints),
fval,
upper_bound,
SA_time,
solve_time,
total_time_so_far,
)
end
else
# We've reached `max_retries` attempts to find a violated constraint.
verbose && _log_retries(max_retries)
break
end
# if we do another loop, it will take a round of SA plus a solve, so let's stop if
# that will exceed the max time.
projected_time = total_time_so_far + 1.1*(SA_time + solve_time)
if projected_time > max_time
if verbose
_log_time(total_time_so_far, projected_time, max_time)
end
break
end
end
return upper_bound
end
@noinline function _log(
lconstraints,
violation,
upper_bound,
SA_time,
solve_time,
total_time_so_far,
)
@info "Adding constraint $(lconstraints) " violation upper_bound SA_time solve_time total_time_so_far
end
@noinline function _log_retries(max_retries)
@info "Unable to find violated constraint after repeating ($max_retries) simulated annealing runs. Change `max_retries` to adjust this behavior."
end
@noinline function _log_time(total_time_so_far, projected_time, max_time)
if total_time_so_far < max_time
@info "Next iteration is projected to exceed maximum time $(max_time); terminating here. Change `max_time` to adjust this behavior." total_time_so_far projected_time
else
@info "Reached maximum time $(max_time); terminating here. Change `max_time` to adjust this behavior." total_time_so_far
end
end
function make_problem(p, ρBs, c, dB, constraints, ::Type{T}) where {T}
R = g -> sum(c[N(invperm(g), x)] * p[x] * ρBs[x] for x in eachindex(p, ρBs))
Y = ComplexVariable(dB, dB)
povm_outcomes = constraints
Y = ComplexVariable(dB, dB)
constraints = Convex.Constraint[R(outcome) ⪰ Y for outcome in povm_outcomes]
push!(constraints, Y' == Y)
objective = real(tr(Y))
return compatible_problem(Convex.maximize, objective, constraints, T), Y, R
end
function update_problem!(problem, Y, new_constraint, R)
push!(problem.constraints, R(new_constraint) ⪰ Y)
end
function get_dual_and_val(
problem,
Y,
make_solver,
::Val{dB},
::Val{verbose},
::Type{T},
) where {dB,T,verbose}
solve!(problem, make_solver(); verbose = verbose)
return problem.optval::T, SMatrix{dB,dB,Complex{T}}(evaluate(Y))
end
"""
SA!(π, scratch_π, f, mutate! = rand_rev!)
Try to minimize `f` over permutations of 1:`length(π)` via a simulated annealing algorithm, returning the value of `f` on the final choice of `π`. Modifies `π` to hold the optimal permutation at the end of the search, and uses `scratch_π` to hold proposal permutations. The optional argument `mutate!` should be a function
mutate!(π_to_mutate, scratch_π, f, J) -> nothing
which modifies `π_to_mutate` in place, can use `scratch_π` to hold temporary permutations, and has access to `f` and `J`, the length of `π_to_mutate`. By default, `rand_rev!` is chosen, which is very fast and requires no evaluations of `f`. Other choices of `mutate!` include `swap_all!` and `two_opt!` (uses ~ `n^2` evaluations of `f`), or a random choice at each step, via e.g.
mutate! = (π_to_mutate, scratch_π, f, J) -> begin
if rand() < .01
two_opt!(π_to_mutate, scratch_π, f, J)
else
rand_rev!(π_to_mutate, scratch_π, f, J)
end
end
"""
function SA!(π, scratch_π, f, mutate!, num_steps)
J = length(π)
init_temp = exp(8)
final_temp = exp(-6.5)
cool_rate = (final_temp / init_temp)^(1 / (num_steps - 1))
# divide by cool_rate so when we first multiply we get init_temp
temp = init_temp / cool_rate
proposed_π = scratch_π
current_cost = f(π)
for _ = 1:num_steps
temp *= cool_rate
mutate!(proposed_π, π, f, J)
proposed_cost = f(proposed_π)
@fastmath accept = proposed_cost < current_cost ? true :
rand() < exp((current_cost - proposed_cost) / temp)
if accept
current_cost = proposed_cost
π .= proposed_π
else
proposed_π .= π
end
end
# try all possible swaps at the end just to try to really have a optimal value.
fval = two_opt!(π, scratch_π, f, J)
return fval
end
"""
rand_rev!(π, scratch_π = nothing, f = nothing, n = length(π))
Modifies `π` by randomly reversing a section of the permutation. Has the arguments `scratch_π` and `f` so it can be used in `SA!`.
"""
@inline function rand_rev!(π, scratch_π = nothing, f = nothing, n = length(π))
i, j = rand(1:n), rand(1:n)
if i > j
i, j = j, i
end
reverse!(π, i, j)
end
"""
swap_all!(π, scratch_π, f, n = length(π))
Modifies `π` in place to find a lower value of `f` by swapping each of the `n*(n-1)/2` pairs of entries and choosing the optimal one.
"""
function swap_all!(π, scratch_π, f, n = length(π))
@inline swap!(π, i, j) = π[i], π[j] = π[j], π[i]
_mutate_two!(π, scratch_π, f, swap!, n)
end
"""
two_opt!(π, scratch_π, f, n = length(π))
Modifies `π` in place to find a lower value of `f` by reversing each possible ordered section of `π` by trying all `n*(n-1)/2` possible pairs of endpoints and choosing the optimal one.
"""
function two_opt!(π, scratch_π, f, n = length(π))
_mutate_two!(π, scratch_π, f, reverse!, n)
end
@inline function _mutate_two!(π, scratch_π, f, mut!, n)
best_fval_so_far = Inf
best_π_so_far = scratch_π
for (i, j) in combinations(1:n, 2)
mut!(π, i, j)
fval = f(π)
if fval < best_fval_so_far
best_fval_so_far = fval
best_π_so_far .= π
end
mut!(π, i, j)
end
π .= best_π_so_far
return best_fval_so_far
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 579 | using Test, GuessworkQuantumSideInfo, LinearAlgebra, Statistics, Random, UnPack, StableRNGs
using GenericLinearAlgebra
using SCS
TEST_MATLAB = false # requires MATLAB and MATLAB.jl installed
TEST_MISDP = false # requires Pajarito or another MISDP solver
TEST_BB84_MISDP = false # takes ~100 seconds; requires TEST_MISDP
TEST_MOI = true # incompatible with the current version of Pajarito
default_sdp_solver() = TEST_MOI ? SCS.Optimizer(verbose = 0, eps = 1e-6) : SCSSolver(verbose = 0, eps = 1e-6)
if TEST_MATLAB
include("test_matlab.jl")
end
include("test_problems.jl")
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 3482 | # Earlier versions of this code were tested, but I haven't been able to test the
# latest version due to licensing issues.
using MATLAB
# Monkeypatch the MATLAB versions into the `GuessworkQuantumSideInfo` module
@eval GuessworkQuantumSideInfo begin
@doc raw"""
makeR(data)
Compute a three-dimensional array `R`, such that the third index ranges from
`y=1:J^k`, and `R[:,:,y]` is described by
``
\sum_{x=1}^J p(x) \sum_{n\in L} c_n μ(n,x,\vec y) ρ_B^x
``
"""
function makeR(data; do_transpose::Bool)
@unpack J, K, p, ρBs, c, dB, povm_outcomes = data
d_out = length(povm_outcomes)
R_function(g) = sum(c[N(g, x)] * p[x] * ρBs[x] for x in eachindex(p, ρBs))
R = Array{ComplexF64}(undef, (dB, dB, d_out))
if do_transpose
for (y, outcome) in enumerate(povm_outcomes)
R[:, :, y] .= transpose(R_function(outcome))
end
else
for (y, outcome) in enumerate(povm_outcomes)
R[:, :, y] .= R_function(outcome)
end
end
R
end
function guesswork_MATLAB(
p,
ρBs;
K = length(p),
c = [1:K..., 10_000],
dual::Bool = false,
remove_repetition::Bool = true,
povm_outcomes = make_povm_outcomes(length(p), K, remove_repetition),
)
length(p) == length(ρBs) ||
throw(ArgumentError("Length of prior and vector of side information must match J"))
J = length(p)
dB = size(ρBs[1], 1)
all(ρB -> size(ρB) == (dB, dB), ρBs) ||
throw(ArgumentError("All side-information states must be square matrices of the same size"))
d_out = length(povm_outcomes)
data = (
J = J,
K = K,
c = c,
p = p,
ρBs = ρBs,
dB = dB,
d_out = d_out,
povm_outcomes = povm_outcomes,
)
if dual
R = makeR(data; do_transpose = false)
mat"""
dB = double($(dB));
d_out = double($(d_out));
R = $(R);
cvx_begin sdp quiet
variable Y(dB, dB) hermitian
dual variable E
maximize(trace(Y))
subject to
E : R >= repmat(Y, [1, 1, d_out])
cvx_end
$(Y) = Y;
$(E) = E;
$(cvx_optval) = cvx_optval;
$(cvx_status) = cvx_status;
"""
else
R = makeR(data; do_transpose = true)
mat"""
dB = double($(dB));
d_out = double($(d_out));
R = $(R);
cvx_begin sdp quiet
variable E(dB, dB, d_out) hermitian semidefinite
dual variable Y
minimize(real( sum( E(:) .* R(:) )))
subject to
Y : sum(E, 3) == eye(dB)
cvx_end
$(E) = E;
$(Y) = Y;
$(cvx_optval) = cvx_optval;
$(cvx_status) = cvx_status;
"""
end
status = Symbol(cvx_status)
if status != :Solved
@error "CVX did not successfully solve the problem" status
end
Es = [E[:, :, j] for j = 1:d_out]
return (
optval = cvx_optval,
status = status,
Y = Y,
Es = Es,
povm_outcomes = Tuple.(povm_outcomes),
data...,
)
end
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 9096 |
# bad solve error on 1.0 and nightly observed on CI (though not locally)
# possibly reflects problems with SCS, but not with this package,
# so we'll just use a relaxed tolerance here.
TOL = 1e-2
@info "Beginning tests with" TEST_MATLAB TEST_MISDP TEST_BB84_MISDP TEST_MOI TOL
include("test_utilities.jl")
dB = 2
ketplus = (ket(1, dB) + ket(2, dB)) / sqrt(2)
ketminus = (ket(1, dB) - ket(2, dB)) / sqrt(2)
ketzero = ket(1, dB)
ketone = ket(2, dB)
Random.seed!(5)
@testset "GuessworkQuantumSideInfo.jl" begin
@testset "Uninformative side information" begin
ρBs = dm.([ketzero, ketzero])
p = [0.5, 0.5]
output = test_optimize(p, ρBs, 1.5)
pmf = pmfN(output)
@test pmf ≈ [0.5, 0.5] rtol = TOL
lb = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver()).optval
@test lb <= output.optval + TOL
ub = guesswork_upper_bound(p, ρBs; num_constraints = 2^2 + 1, make_solver = default_sdp_solver).optval
@test output.optval <= ub + TOL
end
@testset "Orthogonal side information" begin
ρBs = dm.([ketzero, ketone])
p = [0.5, 0.5]
output = test_optimize(p, ρBs, 1.0)
pmf = pmfN(output)
@test pmf ≈ [1.0, 0.0] rtol = TOL
lb = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver()).optval
@test lb <= output.optval + TOL
ub = guesswork_upper_bound(p, ρBs; num_constraints = 2^2 + 1, make_solver = default_sdp_solver).optval
@test output.optval <= ub + TOL
end
@testset "Plus zero" begin
ρBs = dm.([ketplus, ketzero])
p = [0.5, 0.5]
output = test_optimize(p, ρBs, cos(π / 8)^2 + 2 * sin(π / 8)^2)
@test pmfN(output) ≈ [cos(π / 8)^2, sin(π / 8)^2] rtol = TOL
lb = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver()).optval
@test lb <= output.optval + TOL
ub = guesswork_upper_bound(p, ρBs; num_constraints = 2^2 + 1, make_solver = default_sdp_solver).optval
@test output.optval <= ub + TOL
end
@testset "Three random qubits" begin
ρBs = [randdm(2) for _ = 1:3]
p = ones(3) / 3
@testset "Basic tests" begin
output = test_optimize(p, ρBs; test_MISDP = false)
lb = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver()).optval
@test lb <= output.optval + TOL
ub = guesswork_upper_bound(p, ρBs; num_constraints = 2^2 + 1, make_solver = default_sdp_solver).optval
@test output.optval <= ub + TOL
end
@testset "Custom cost vector" begin
c = cumsum(10*rand(3)) # increasing vector
output_c = test_optimize(p, ρBs; test_MISDP = false, c = c)
lb_c = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver(), c = c).optval
@test lb_c <= output_c.optval + TOL
ub_c = guesswork_upper_bound(p, ρBs; num_constraints = 2^2 + 1, make_solver = default_sdp_solver, c = c).optval
@test output_c.optval <= ub_c + TOL
end
end
@testset "Three random qutrits" begin
ρBs = [randdm(3) for _ = 1:3]
p = ones(3) / 3
output = test_optimize(p, ρBs; test_MISDP = false)
lb = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver()).optval
@test lb <= output.optval + TOL
ub = guesswork_upper_bound(p, ρBs; num_constraints = 3^2 + 1, make_solver = default_sdp_solver).optval
@test output.optval <= ub + TOL
end
@testset "Four random qubits" begin
ρBs = [randdm(2) for _ = 1:4]
p = [0.25, 0.25, 0.25, 0.25]
@testset "Basic tests" begin
output = test_optimize(p, ρBs; test_MISDP = false)
lb = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver()).optval
@test lb <= output.optval + TOL
ub = guesswork_upper_bound(p, ρBs; num_constraints = 2^2 + 1, make_solver = default_sdp_solver).optval
@test output.optval <= ub + TOL
end
@testset "K=2 guesses" begin
output_K = test_optimize(p, ρBs; test_MISDP = false, K = 2)
pmf = pmfN(output_K)
@test length(pmf) == 3
@test sum(pmf) ≈ 1.0 atol = TOL
@test all(x -> x >= -TOL, pmf)
end
end
@testset "BB84" begin
ρBs = BB84_states()
p = ones(4) / 4
output = guesswork(p, ρBs; solver = default_sdp_solver())
testPOVM(output.Es)
if TEST_BB84_MISDP && TEST_MISDP
@info "Starting BB84 MISDP"
output_MISDP, t, _ = @timed guesswork_MISDP(p, ρBs, 2; solver = misdp_solver())
@info "Finished BB84 MISDP in $(round(t;digits=3)) seconds."
testPOVM(output_MISDP.Es)
@test output_MISDP.optval ≈ output.optval rtol = TOL
end
relaxed_output = guesswork_upper_bound(p, ρBs; num_constraints = 4, make_solver = default_sdp_solver)
@test output.optval ≈ relaxed_output.optval rtol = TOL
lb = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver()).optval
@test lb <= output.optval + TOL
end
@testset "BB84 variant" begin
ρBs =
dm.([
ketzero,
ketone,
(ketzero + im * ketone) / sqrt(2),
(ketzero - im * ketone) / sqrt(2),
])
J = length(ρBs)
p = ones(J) / J
output = test_optimize(p, ρBs; test_MISDP = false)
lb = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver()).optval
@test lb <= output.optval + TOL
ub = guesswork_upper_bound(p, ρBs; num_constraints = 2^2 + 1, make_solver = default_sdp_solver).optval
@test output.optval <= ub + TOL
end
@testset "Errors and logging" begin
ρBs = BB84_states()
J = length(ρBs)
p = ones(J) / J
@test_throws ArgumentError guesswork_upper_bound(p, ρBs; num_constraints = Inf, max_time=Inf, max_retries = Inf, make_solver = default_sdp_solver)
@test_logs (:info, r"Adding constraint") match_mode=:any guesswork_upper_bound(p, ρBs; verbose=true, make_solver = default_sdp_solver, max_time = 2)
two_states = [randdm(2) for _ = 1:2]
p_3 = randprobvec(3)
if TEST_MISDP
@test_logs (:info, r"MISDP solve") match_mode=:any guesswork_MISDP(randprobvec(2), [randdm(2) for _ = 1:2], 5; verbose=true, solver = misdp_solver())
@test_throws ArgumentError guesswork_MISDP(p_3, two_states, 5; solver = misdp_solver())
end
@test_throws ArgumentError guesswork(p_3, two_states; solver = default_sdp_solver())
@test_throws ArgumentError guesswork_lower_bound(p_3, two_states; solver = default_sdp_solver())
@test_throws ArgumentError guesswork_upper_bound(p_3, two_states; make_solver = default_sdp_solver)
end
@testset "Concavity of the guesswork" begin
J = 3
dB = 2
p_1 = randprobvec(J)
ρBs_1 = [randdm(dB) for _ = 1:J]
p_2 = randprobvec(J)
ρBs_2 = [randdm(dB) for _ = 1:J]
λ = rand()
p = λ .* p_1 + (1-λ) .* p_2
ρBs = λ .* ρBs_1 + (1-λ) .* ρBs_2
g_avg = guesswork(p, ρBs; solver = default_sdp_solver()).optval
g_1 = guesswork(p_1, ρBs_1; solver = default_sdp_solver()).optval
g_2 = guesswork(p_2, ρBs_2; solver = default_sdp_solver()).optval
@test TOL + g_avg >= λ*g_1 + (1-λ)*g_2
end
@testset "Quantum states" begin
for rng in (tuple(), tuple(StableRNG(123))) # test with custom RNG
for T in (BigFloat, Float64)
for d in (2,3)
ρ = randdm(rng..., T, d)
@test eltype(ρ) == Complex{T}
@test tr(ρ) ≈ one(T) rtol=1e-8
@test all( x-> x >= -1e-8, eigvals(ρ))
U = GuessworkQuantumSideInfo.randunitary(rng..., T, d)
@test eltype(U) == Complex{T}
@test U' * U ≈ I(d) atol=1e-6
@test U * U' ≈ I(d) atol=1e-6
k = ket(T, 1, d)
@test k' ≈ bra(T, 1, d) rtol=1e-8
p = randprobvec(rng..., T, d)
@test eltype(p) == T
@test sum(p) ≈ one(T) rtol=1e-8
@test all( x-> x >= -1e-8, p)
end
ρBs = BB84_states(T)
@test length(ρBs) == 4
@test Set(iid_copies(ρBs, 2)) == Set([ ρ ⊗ σ for ρ in ρBs for σ in ρBs])
end
end
# Defaults
@test BB84_states() ≈ BB84_states(Float64)
@test BB84_states() ≈ dm.([ketzero, ketone, ketminus, ketplus]) atol=1e-6
@test ket(1,2) ≈ ket(Float64, 1, 2)
@test bra(1, 2) ≈ bra(Float64, 1, 2)
@test eltype(randdm(2)) == Complex{Float64}
@test eltype(GuessworkQuantumSideInfo.randunitary(2)) == Complex{Float64}
@test eltype(randprobvec(2)) == Float64
end
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 3889 | Base.isapprox(x) = Base.Fix2(isapprox, x)
function testPOVM(Es; tol = TOL)
dB = size(first(Es), 1)
@test sum(Es) ≈ complex(1.0) * I(dB) atol = tol
for E in Es
@test E ≈ E' atol = tol
end
@test all(isposdef, Hermitian(E) + tol * I(dB) for E in Es)
end
function test_misdp_objective_value(data)
@unpack povm_outcomes, ρBs, p, Es = data
c = collect(1:length(povm_outcomes[1]))
tot_cost = 0.0
ρBs_tilde = p .* ρBs
for y in eachindex(Es)
E = Es[y]
v = [tr(ρBs_tilde[x] * E) for x in eachindex(ρBs_tilde)]
perm = povm_outcomes[y] |> collect
cost = dot(c[invperm(perm)], v)
tot_cost += cost
end
@test tot_cost ≈ data.optval rtol = TOL
end
get_SCS_solver() = TEST_MOI ? SCS.Optimizer(verbose = 0, eps = 1e-6) : SCSSolver(verbose = 0, eps = 1e-6)
function test_optimize(
p,
ρBs,
true_opt_val = nothing;
test_repetition = false,
test_MISDP = true,
kwargs...
)
optvals = []
test_data = []
local current_output
for dual in (true, false)
for remove_repetition in (test_repetition ? (false, true) : (true,))
for (solver, solver_name) in ((get_SCS_solver(), :SCS),)
current_output = guesswork(p, ρBs; solver = solver, dual = dual, remove_repetition = remove_repetition, kwargs...)
push!(optvals, current_output.optval)
push!(
test_data,
(
opval = current_output.optval,
solver = solver_name,
params = (dual = dual, remove_repetition = remove_repetition, kwargs...),
),
)
end
if TEST_MATLAB
# `GuessworkQuantumSideInfo.guesswork_MATLAB` only exists if `include("test_matlab.jl")` is called.
current_output =
GuessworkQuantumSideInfo.guesswork_MATLAB(p, ρBs; dual = dual, remove_repetition = remove_repetition, kwargs...)
push!(optvals, current_output.optval)
push!(
test_data,
(
opval = current_output.optval,
solver = :MATLAB,
params = (dual = dual, remove_repetition = remove_repetition, kwargs...),
),
)
end
end
end
if test_MISDP && TEST_MISDP
dB = size(ρBs[1], 1)
num_outcomes = min(factorial(length(p)), dB^2 + 1)
if :c in keys(kwargs)
current_output = guesswork_MISDP(
p,
ρBs,
num_outcomes;
c = kwargs.c,
solver = misdp_solver(),
verbose = false,
)
else
current_output = guesswork_MISDP(
p,
ρBs,
num_outcomes;
solver = misdp_solver(),
verbose = false,
)
end
test_misdp_objective_value(current_output)
# test_primal(current_output)
push!(optvals, current_output.optval)
push!(
test_data,
(
opval = current_output.optval,
solver = :MISDP,
params = (num_outcomes = num_outcomes,),
),
)
end
if true_opt_val === nothing
true_opt_val = mean(optvals)
end
all_close_to_optimal = all(x -> isapprox(x, true_opt_val; rtol = TOL), optvals)
# some helpful information in case of test failure
if !all_close_to_optimal
@error "`test_optimize` failure" true_opt_val
for (i, test) in enumerate(test_data)
@error "Solve $i results:" test
end
end
@test all_close_to_optimal
return current_output
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 1160 | using GuessworkQuantumSideInfo
using SDPAFamily
using Test
using LinearAlgebra, GenericLinearAlgebra
# Fix a compatibility problem
LinearAlgebra.eigmin(A::Hermitian{Complex{BigFloat},Array{Complex{BigFloat},2}}) =
minimum(real.(eigvals(A)))
function testPOVM(Es; tol = 1e-25)
dB = size(first(Es), 1)
@test sum(Es) ≈ complex(1.0) * I(dB) atol = tol
for E in Es
@test E ≈ E' atol = tol
end
@test all(isposdef, Hermitian(E) + tol * I(dB) for E in Es)
end
T = BigFloat
default_sdp_solver(T) = SDPAFamily.Optimizer{T}(presolve = true)
@testset "BB84" begin
ρBs = GuessworkQuantumSideInfo.BB84_states(T)
p = ones(T, 4) / 4
output = guesswork(p, ρBs; solver = default_sdp_solver(T))
testPOVM(output.Es)
relaxed_output =
guesswork_upper_bound(p, ρBs; num_constraints = 4, make_solver = () -> default_sdp_solver(T))
true_val = (big(1) / big(4)) * (10 - sqrt(big(10)))
@test output.optval ≈ true_val rtol = 1e-25
@test output.optval ≈ relaxed_output.optval rtol = 1e-4
lb = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver(T)).optval
@test lb <= output.optval + 1e-4
end
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | code | 851 | using Test, GuessworkQuantumSideInfo, LinearAlgebra, Statistics, Random, UnPack
using GenericLinearAlgebra
using SCS, StableRNGs
using Pajarito, Cbc
TEST_MATLAB = false # requires MATLAB and MATLAB.jl installed
TEST_MISDP = true # requires Pajarito or another MISDP solver
TEST_BB84_MISDP = false # takes ~100 seconds; requires TEST_MISDP
TEST_MOI = false # incompatible with the current version of Pajarito
default_sdp_solver() = SCSSolver(verbose = 0, eps = 1e-6)
function misdp_solver(; verbose = false)
sdp_solver = default_sdp_solver()
mip_solver = CbcSolver(loglevel = 0)
PajaritoSolver(
cont_solver = sdp_solver,
mip_solver = mip_solver,
mip_solver_drives = false,
use_mip_starts = true,
solve_relax = false,
log_level = verbose ? 3 : 0,
)
end
include("../test_problems.jl")
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | docs | 2685 | # GuessworkQuantumSideInfo
[](https://github.com/ericphanson/GuessworkQuantumSideInfo.jl/actions)
[](https://codecov.io/gh/ericphanson/GuessworkQuantumSideInfo.jl)
[](https://ericphanson.github.io/GuessworkQuantumSideInfo.jl/stable)
[](https://ericphanson.github.io/GuessworkQuantumSideInfo.jl/dev)
[](https://arxiv.org/abs/2001.03598)
[](https://zenodo.org/badge/latestdoi/236524092)
This is a package accompanying the preprint [*Guesswork with Quantum Side Information*](https://arxiv.org/abs/2001.03598).
## Quick example
Consider one party Alice who draws a random number in the set `[1,2,3,4]`
uniformly at random. If she draws `1` she sends another party, Bob, the quantum
state `|0⟩`; if she draws `2`, she sends `|1⟩`, if she draws `3` she sends
`|-⟩`, and finally if she draws `4`, she sends `|+⟩`. Bob, knowing this general
procedure but not which number Alice drew, aims to guess the value Alice drew by
performing experiments on the quantum state he was given. The average number of
guesses Bob needs in order to get the right answer, minimized over all quantum
strategies, is the so-called *guesswork with quantum side information*. This
package provides a means to compute this.
```julia
julia> using GuessworkQuantumSideInfo, SCS
julia> p = [0.25, 0.25, 0.25, 0.25];
julia> ketzero = ket(1, 2);
julia> ketone = ket(2, 2);
julia> ketminus = (ket(1, 2) - ket(2,2))/sqrt(2);
julia> ketplus = (ket(1, 2) + ket(2,2))/sqrt(2);
julia> ρBs = dm.([ ketzero, ketone, ketminus, ketplus ])
4-element Array{Array{Complex{Float64},2},1}:
[1.0 + 0.0im 0.0 + 0.0im; 0.0 + 0.0im 0.0 + 0.0im]
[0.0 + 0.0im 0.0 + 0.0im; 0.0 + 0.0im 1.0 + 0.0im]
[0.4999999999999999 + 0.0im -0.4999999999999999 - 0.0im; -0.4999999999999999 + 0.0im 0.4999999999999999 + 0.0im]
[0.4999999999999999 + 0.0im 0.4999999999999999 + 0.0im; 0.4999999999999999 + 0.0im 0.4999999999999999 + 0.0im]
julia> output = guesswork(p, ρBs; solver = SCSSolver(verbose=false));
julia> output.optval
1.709431078700102
```
It turns out it takes `(1/4)*(10 - sqrt(10)) ≈ 1.71` guesses on average.
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | docs | 3957 | # Examples
Let's load the package, an SDP solver, and define a simple plotting routine.
```@example ex
using GuessworkQuantumSideInfo
using SCS, Plots
get_sdp_solver() = SCSSolver(verbose=false)
plot_pmfN(data) = bar(pmfN(data); xlabel="Guess number", ylabel="Probability of guessing correctly", legend = false)
```
Next, we define some basic qubit states.
```@example ex
dB = 2
ketplus = (ket(1, dB) + ket(2,dB))/sqrt(2)
ketminus = (ket(1, dB) - ket(2,dB))/sqrt(2)
ketzero = ket(1, dB)
ketone = ket(2, dB)
```
## Example 1: A warmup with trivial examples
Let's consider the case with $J=2$ and both states are the same. The side
information is therefore completely uninformative, and the guesswork is
```math
G(X|B) = \frac{1}{2}\cdot 1 + \frac{1}{2} \cdot 2 = 1.5.
```
We can check this:
```@repl ex
p = [0.5, 0.5]
ρBs = dm.([ ketzero, ketzero ])
output = guesswork(p, ρBs; solver = get_sdp_solver());
output.optval
```
We see the result agrees with `1.5`, as we expected. Likewise, if we choose the
two states as $ |0\rangle, |1\rangle$, we can get it in one guess every time, of
course, since they are orthogonal:
```@repl ex
p = [0.5, 0.5]
ρBs = dm.([ ketzero, ketone ])
output = guesswork(p, ρBs; solver = get_sdp_solver());
output.optval
```
We can inspect the POVMs:
```@repl ex
output.Es
```
As we would expect, we (approximately) obtain the projection onto $|0 \rangle$
and the projection onto $|1 \rangle$.
## Example 2: the BB84 states
Let's consider the four states $|+ \rangle, |-\rangle, |0\rangle, |1\rangle$.
```@repl ex
p = [0.25, 0.25, 0.25, 0.25]
ρBs = dm.([ ketplus, ketminus, ketzero, ketone ])
output = guesswork(p, ρBs; solver = get_sdp_solver());
output.optval
```
```@example ex
plot_pmfN(output)
```
Let's try the same example, but imposing a steep cost for the fourth guess.
```@repl ex
c = [1.0, 2.0, 3.0, 5000.0]
output = guesswork(p, ρBs; c = c, solver = get_sdp_solver());
output.optval
```
We see that the average number of guesses to get a correct answer has gone up.
However, inspecting the probability mass function for the number of guesses
under the optimal strategy
```@example ex
plot_pmfN(output)
```
we see that the probability that the probability of guessing correctly on the
fourth guess goes to almost zero.
## Example 3: two copies of the BB84 states
Let us consider two tensor copies of the BB84 states:
```@repl ex
p = ones(16)/16
ρBs = iid_copies(BB84_states(), 2)
```
In this case, there are $16! = 20922789888000$ possible guessing orders, and
hence $16!$ variables in the primal formulation of the SDP, or $16!+1$
constraints in the dual form of the SDP. In either case, we can't even fit them
all into our computer's memory. Instead, we resort to bounds:
```@repl ex
lb_output = guesswork_lower_bound(p, ρBs, solver = get_sdp_solver());
lb_output.optval
ub_output = guesswork_upper_bound(p, ρBs; max_time = 30, make_solver = get_sdp_solver);
ub_output.optval
```
## A closer look at `guesswork_upper_bound`
We can understand [`guesswork_upper_bound`](@ref) better by setting
`verbose=true`. The algorithm computes a sequence of upper bounds by relaxing
the dual problem. First, it removes all constraints, and chooses the dual
variable $Y$ as the identity matrix. Then it uses a simulated annealing
algorithm to heuristically minimize $\lambda_\text{min}(R_{\vec g} - Y)$ over
the possible guessing orders $\vec g$ (which are permutations), to find a
constraint that is "maximally" violated by this choice of $Y$. Then we add the
corresponding constraint $Y \leq R_{\vec g}$ to the dual problem and solve it
again. This is repeated until either a fixed number of constraints is added,
some number of simulated annealing runs fails to find another violated
constraint, or a time limit is reached. In the following, we set a time limit of
30 seconds.
```@repl ex
ub_output = guesswork_upper_bound(p, ρBs; verbose=true, max_time = 30, make_solver = get_sdp_solver);
```
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | docs | 3020 | # Computing the guesswork to high precision
In order to solve the SDP describing the guesswork in high-precision, we need to
start a new Julia session. That's because Pajarito, the library used for
mixed-integer SDPs, and SDPAFamily, the one for high-precision SDPs, each
require different versions of Convex.jl, the optimization problem modeling
library. The following example was run locally and the output copied to the
documentation. First, add the packages `Convex#master` (or just `Convex` once
version 0.13 of that package is released) and `SDPAFamily`, and then the
following code.
```julia
julia> setprecision(2000) # set Julia's global BigFloat precision to 2000
2000
julia> using SDPAFamily
julia> opt = () -> SDPAFamily.Optimizer{BigFloat}(
presolve = true,
params = ( epsilonStar = 1e-200, # constraint tolerance
epsilonDash = 1e-200, # normalized duality gap tolerance
precision = 2000 # arithmetric precision used in sdpa_gmp
))
#186 (generic function with 1 method)
julia> using GuessworkQuantumSideInfo
julia> T = BigFloat
BigFloat
julia> ρBs = BB84_states(T);
julia> p = ones(T, 4) / 4;
julia> @time output = guesswork(p, ρBs; solver = opt());
54.613852 seconds (274.09 M allocations: 45.610 GiB, 11.66% gc time)
julia> output.optval
1.709430584957905167000276613891820366570111215168695793285623786801851390340190444663937972905174926203163178961789986212862785992386529996327370100824961524163769585705185014835212461631471665128968986016501876699676856588609960582022565322653047114497843315997252226645378373262132182609166891615169945992530274280324399117232937277795982220635506452810752194823768763057910726875757516626180726385923719763995534231714003266054518160879579903803264241437877679215965923661443029759736849138449576021864074135403089512757915961340265964663906514782565168514016103734338487088415453174248635495108648696
julia> true_val = (big(1) / big(4)) * (10 - sqrt(big(10)))
1.709430584957905167000276613891820366570111215168695793285623786801851390340190444663937972905174926203163178961789986212862785992386529996327370100824961524163769585705185014835212461631471665128968978670767605727382105884738907685530234490547847514542907072112898417861750220350793858949168259598827171415967762552027433367614096830955304662484652953430354884079216261880492214057666332006545121703580399192189551149445422988531788846349936605605074292838992743898005525991364002709162694336846983051837080992237890825551560768461069874907526272632644537498160412788839029317222677722739412986441086645
julia> abs(output.optval - true_val) <= 10.0^(-200)
true
```
Note that the output of the optimization solver matches `true_val` up to an
error of at most $10^{-200}$.
See also
<https://github.com/ericphanson/GuessworkQuantumSideInfo.jl/tree/master/test/high_precision_tests>
for a folder with a reproducible environment for running this kind of
high-precision code.
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | docs | 2060 | ```@meta
CurrentModule = GuessworkQuantumSideInfo
```
# GuessworkQuantumSideInfo
This is a package accompanying the preprint [*Guesswork with Quantum Side Information*](http://arxiv.org/abs/2001.03598).
See the [Examples](@ref) for some examples (or just below for a quick example),
[Computing the guesswork to high precision](@ref) for an example solving a
problem with a high-precision SDP solver, [Using a mixed-integer SDP to find
extremal strategies](@ref) for an example using a mixed-integer SDP, or below
for the documentation of the functions provided by this package.
## Quick example
Consider one party Alice who draws a random number in the set `[1,2,3,4]`
uniformly at random. If she draws `1` she sends another party, Bob, the quantum
state `|0⟩`; if she draws `2`, she sends `|1⟩`, if she draws `3` she sends
`|-⟩`, and finally if she draws `4`, she sends `|+⟩`. Bob, knowing this general
procedure but not which number Alice drew, aims to guess the value Alice drew by
performing experiments on the quantum state he was given. The average number of
guesses Bob needs in order to get the right answer, minimized over all quantum
strategies, is the so-called *guesswork with quantum side information*. This
package provides a means to compute this.
```@repl
using GuessworkQuantumSideInfo, SCS
p = [0.25, 0.25, 0.25, 0.25];
ketzero = ket(1, 2);
ketone = ket(2, 2);
ketminus = (ket(1, 2) - ket(2,2))/sqrt(2);
ketplus = (ket(1, 2) + ket(2,2))/sqrt(2);
ρBs = dm.([ ketzero, ketone, ketminus, ketplus ])
output = guesswork(p, ρBs; solver = SCSSolver(verbose=false));
output.optval
```
## Guesswork functions
```@docs
GuessworkQuantumSideInfo.guesswork
GuessworkQuantumSideInfo.guesswork_lower_bound
GuessworkQuantumSideInfo.guesswork_upper_bound
```
## Quantum states
```@docs
GuessworkQuantumSideInfo.ket
GuessworkQuantumSideInfo.bra
GuessworkQuantumSideInfo.BB84_states
GuessworkQuantumSideInfo.iid_copies
GuessworkQuantumSideInfo.randdm
GuessworkQuantumSideInfo.randprobvec
```
## Utilities
```@docs
GuessworkQuantumSideInfo.pmfN
```
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | docs | 2203 | # Using a mixed-integer SDP to find extremal strategies
Let us revisit [Example 2: the BB84 states](@ref) using a mixed-integer SDP.
This allows us to specify the number of non-zero POVM elements in the
measurement-- or, equivalently, the number of possible guessing orders the final
strategy chooses among.
We will use the [Pajarito.jl](https://github.com/JuliaOpt/Pajarito.jl) solver,
which solves mixed-integer SDPs by solving an alternating sequence of
mixed-integer linear programs and SDPs; see the [Pajarito
paper](https://arxiv.org/abs/1808.05290) for information on how Pajarito works.
Pajarito requires both an SDP solver and a mixed-integer linear solver; we will
use the open source solvers SCS and Cbc, respectively.
```@repl misdp
using GuessworkQuantumSideInfo
using Pajarito, Cbc, SCS # solvers
function misdp_solver(; verbose = false)
sdp_solver = SCSSolver(verbose=0)
mip_solver = CbcSolver(loglevel = 0)
PajaritoSolver(
cont_solver = sdp_solver,
mip_solver = mip_solver,
mip_solver_drives = false,
use_mip_starts = true,
solve_relax = false,
log_level = verbose ? 3 : 0,
)
end
p = ones(4)/4
ρBs = BB84_states()
output = guesswork_MISDP(p, ρBs, 2; verbose=false, solver = misdp_solver());
output.optval
output.Es
```
We see that with only two measurement outcomes we recover the same optimal value
as the case without a constraint on the number of measurement outcomes (in
[Example 2: the BB84 states](@ref)). We've thus found an extremal strategy (in
that the POVM associated to this strategy cannot be written as the convex
combination of two other POVMs). Often, the solutions returned by
[`guesswork`](@ref) do not return extremal POVMs, although the details depend on
the SDP solver used.
We can see what the associated guessing orders are:
```@repl misdp
output.povm_outcomes
```
To summarize, one optimal strategy for the case of `p` uniform, and `ρBs` given
by the four BB84 states, is to perform a projective measurement whose operators
are given by `output.Es` above, and then make guesses in one of the two orders
given by `output.povm_outcomes` (depending on which measurement outcome was
obtained).
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | docs | 18162 | *The following is taken from my PhD thesis (translated from LaTeX to Markdown mostly by Pandoc 2.10).*
- Eric Hanson
# Numeric experiments
Some of the algorithms provided by GuessworkQuantumSideInfo.jl were compared on
a set of 12 test problems. Each problem has *p* ≡ *u*, the uniform
distribution,
for simplicity. The states are chosen as
1. Two random qubit density matrices
2. Two random qutrit density matrices
3. Three pure qubits chosen equidistant within one plane of the Bloch
sphere (the “Y-states”), i.e.
cos(j 2pi/3) |0> + sin(j 2pi/3)|1>, j = 1,2,3
4. Three random qubit density matrices
5. Three random qutrit density matrices
6. The four BB84 states, |0>, |1>, and
|±> = (|0> ± |1>) / sqrt{2}
as well as the “tensor-2” case of
{*ρ* ⊗ *σ* : *ρ*, *σ* ∈ *S*}
for each of the six sets *S* listed above, corresponding to the
guesswork problem with quantum side information associated to
*ρ*<sub>*XB*</sub><sup> ⊗ 2</sup>, where *ρ*<sub>*XB*</sub> is the
state associated to the original guesswork problem with quantum side
information. The random states were chosen uniformly at random
(i.e. according to the Haar measure).
The exponentially-large SDP formulation (and its dual), the
mixed-integer SDP algorithm, and the active set method were compared,
with several choices of parameters and underlying solvers. The
mixed-integer SDP formulation was evaluated with *M* = *d*<sub>*B*</sub>
(yielding an upper bound), *M* = *d*<sub>*B*</sub><sup>2</sup>
(yielding the optimal value), with the Pajarito mixed-integer SDP
solver [2], using Convex.jl (version 0.12.7) [6] to formulate the problem.
Pajarito proceeds by solving mixed-integer linear problems (MILP) and
SDPs as subproblems, and thus uses both a MILP solver and an SDP solver
as subcomponents. Pajarito provides two algorithms: an iterative
algorithm, which alternates between solving MILP and SDP subproblems,
and solving a single branch-and-cut problem in which SDP subproblems are
solved via so-called lazy callbacks to add cuts to the mixed-integer
problem. The latter is called “mixed-solver drives” (MSD) in the
Pajarito documentation. We tested three configurations of Pajarito
(version 0.7.0):
* (c1)
Gurobi (version 9.0.3) as the MILP solver and MOSEK (version 8.1.0.82)
as the SDP solver, with Pajarito’s MSD algorithm
* (c2)
Gurobi as the MILP solver and MOSEK as the SDP solver, with Pajarito’s
iterative algorithm, with a relative optimality gap tolerance of 0,
* \(o\)
Cbc (version 2.10.3) [4] as the MILP solver, and SCS (version 2.1.1) [5] as the
SDP solver, with Pajarito’s iterative algorithm
Here, ‘c’ stands for commercial, and ‘o’ for open-source. In the
configuration (c1), Gurobi was set to have with a relative optimality
gap tolerance of 10<sup> − 5</sup> and in (c2), a relative optimality
gap tolerance of 0. In both configurations, Gurobi was given an absolute
linear-constraint-wise feasibility tolerance of 10<sup> − 8</sup>, and
an integrality tolerance of 10<sup> − 9</sup>. These choices of
parameters match those made in [2]. Cbc was given an integrality tolerance
of 10<sup> − 8</sup>, and SCS’s (normalized) primal, dual residual and
relative gap were set to 10<sup> − 6</sup> for each problem. The default
parameters were used otherwise. Note the MSD option was not used with
Cbc, since the solver does not support lazy callbacks.
For the (exponentially large) SDP primal and dual formulations, the
problems were solved with both MOSEK and SCS, and likewise with the
active-set upper bound.
The active set method uses simulated annealing to iteratively add
violated constraints to the problem to find an upper bound, as described
in , and uses a maximum-time parameter *t*<sub>max</sub> to stop
iterating when the estimated time of finding another constraint to add
would cause the running time of to exceed the maximum-time[1]. This
provides a way to compare the improvement (or lack thereof) of running
the algorithm for more iterations. The algorithm also terminates when a
violated constraint cannot be found after 50 runs of simulated annealing
(started each time with different random initial conditions). Here, the
problems were solved with three choices of *t*<sub>max</sub>, 20 s,
60 s, and 240 s.
The exact answer was not known analytically for most of these problems,
so the average relative error was calculated by comparing to the mean of
the solutions (excluding the active-set method and the MISDP with
*M* = *d*<sub>*B*</sub>, which only give an upper bound in general). In
the BB84 case, in which the solution is known exactly (see [3]), the
solutions obtained here match the analytic value to a relative tolerance
of at least 10<sup> − 7</sup>.
The problems were run sequentially on a 4-core desktop computer (Intel
i7-6700K 4.00GHz CPU, with 16 GB of RAM, on Ubuntu-20.04 via Windows
Subsystem for Linux, version 2), via the programming language Julia
(version 1.5.1), with a 5 minute time limit.
One can see that the MISDP problems were harder solve than the
corresponding SDPs for these relatively small problem instances. The
MISDPs have the advantage of finding extremal solutions, however, in the
case *M* = *d*<sub>*B*</sub>, and may scale better to large instances.
Additionally, the active-set upper bound performed fairly well, finding
feasible points within 20 % of the optimum in all cases, with only
*t*<sub>max</sub> = 20 s, and often finding near-optimal solutions. It
was also the only method able to scale to the largest instances tested,
such as two copies of the BB84 states (which involves 16 quantum states
in dimension 4, and which the SDP formulation has 16! variables.). In
general, the commercial solvers performed better than the open source
solvers, with the notable exception of the active-set upper bound with
MOSEK, in which 2 more problems timed out than with SCS. This could be
due to SCS being a first-order solver which can therefore possibly scale
to larger problem instances than MOSEK, which is a second-order solver.
| Algorithm | Parameters | average rel. error | average time | number solved | number timed out | number errored out |
|:--------------------------------------|:--------------------------|:-------------------|:-------------|:--------------|:-----------------|:-------------------|
| MISDP (*d*<sub>*B*</sub>) | Pajarito (c1) | 0 % | 23.74 s | 6 | 6 | 0 |
| | Pajarito (c2) | 0 % | 24.03 s | 6 | 6 | 0 |
| | Pajarito (o) | 0 % | 45.05 s | 6 | 6 | 0 |
| MISDP (*d*<sub>*B*</sub><sup>2</sup>) | Pajarito (c1) | 0 % | 35.27 s | 5 | 7 | 0 |
| | Pajarito (c2) | 0 % | 27.97 s | 4 | 8 | 0 |
| | Pajarito (o) | 0 % | 131.35 s | 4 | 8 | 0 |
| SDP | MOSEK | 0 % | 8.99 s | 8 | 3 | 1 |
| | SCS | 0 % | 9.08 s | 8 | 3 | 1 |
| SDP (dual) | MOSEK | 0 % | 8.74 s | 8 | 3 | 1 |
| | SCS | 0 % | 8.59 s | 8 | 3 | 1 |
| Active set upper bound (MOSEK) | *t*<sub>max</sub> = 20 s | 6.80 % | 16.08 s | 10 | 2 | 0 |
| | *t*<sub>max</sub> = 60 s | 6.79 % | 19.03 s | 10 | 2 | 0 |
| | *t*<sub>max</sub> = 240 s | 6.80 % | 26.17 s | 10 | 2 | 0 |
| Active set upper bound (SCS) | *t*<sub>max</sub> = 20 s | 6.09 % | 33.24 s | 12 | 0 | 0 |
| | *t*<sub>max</sub> = 60 s | 6.09 % | 35.78 s | 12 | 0 | 0 |
| | *t*<sub>max</sub> = 240 s | 6.09 % | 34.30 s | 11 | 1 | 0 |
Caption: Comparison of average relative error and average solve time for the 12
problems discussed above. A problem is considered “timed out” if an
answer is not obtained in 5 minutes, and “errored out” if the solution
was not obtained due to errors (such as running out of RAM). The average
relative error, which was rounded to two decimal digits, and the time
taken are calculated only over the problems which were solved by the
given algorithm and choice of parameters. “MISDP (*d*<sub>*B*</sub>)”
refers to the choice *M* = *d*<sub>*B*</sub>, and likewise “MISDP
(*d*<sub>*B*</sub><sup>2</sup>)” refers to the choice
*M* = *d*<sub>*B*</sub><sup>2</sup>.
| Algorithm | Parameters | Two random qubits | Two random qutrits | Y-states |
|:--------------------------------------|:--------------------------|:----------------------------------|:----------------------------------|:----------------------------|
| MISDP (*d*<sub>*B*</sub>) | Pajarito (c1) | 23.63 s, timeout | 23.60 s, timeout | 23.56 s, timeout |
| | Pajarito (c2) | 22.99 s, timeout | 23.31 s, timeout | 23.21 s, timeout |
| | Pajarito (o) | 23.47 s, timeout | 24.77 s, timeout | 26.15 s, timeout |
| MISDP (*d*<sub>*B*</sub><sup>2</sup>) | Pajarito (c1) | 24.49 s (0.00 %), timeout | 31.40 s (0.00 %), timeout | 26.27 s (0.00 %), timeout |
| | Pajarito (c2) | 25.02 s (0.00 %), timeout | 31.39 s (0.00 %), timeout | 29.08 s (0.00 %), timeout |
| | Pajarito (o) | 26.54 s (0.00 %), timeout | 212.79 s (0.00 %), timeout | 141.14 s (0.00 %), timeout |
| SDP | MOSEK | 8.69 s, 8.84 s | 8.78 s, 9.23 s | 9.33 s, timeout |
| | SCS | 9.00 s, 8.90 s | 8.44 s, 11.22 s | 8.98 s, timeout |
| SDP (dual) | MOSEK | 8.46 s, 8.63 s | 8.54 s, 8.83 s | 9.11 s, timeout |
| | SCS | 8.76 s, 8.32 s | 8.33 s, 9.20 s | 8.74 s, timeout |
| Active set upper bound (MOSEK) | *t*<sub>max</sub> = 20 s | 8.76 s (19.5 %), 10.41 s (1.5 %) | 8.89 s (19.5 %), timeout | 9.72 s (0 %), 34.25 s (? %) |
| | *t*<sub>max</sub> = 60 s | 10.91 s (19.5 %), 10.41 s (1.9 %) | 8.87 s (19.5 %), timeout | 9.66 s (0 %), 31.00 s (? %) |
| | *t*<sub>max</sub> = 240 s | 9.47 s (19.5 %), 10.40 s (1.5 %) | 8.90 s (19.5 %), timeout | 9.81 s (0 %), 30.26 s (? %) |
| Active set upper bound (SCS) | *t*<sub>max</sub> = 20 s | 9.04 s (19.5 %), 10.92 s (1.5 %) | 8.70 s (19.5 %), 101.06 s (1.1 %) | 9.23 s (0 %), 82.84 s (? %) |
| | *t*<sub>max</sub> = 60 s | 9.07 s (19.5 %), 10.22 s (1.9 %) | 8.66 s (19.5 %), 32.94 s (1.2 %) | 9.18 s (0 %), 50.79 s (? %) |
| | *t*<sub>max</sub> = 240 s | 9.04 s (19.5 %), 10.02 s (1.9 %) | 8.79 s (19.5 %), 22.69 s (1.2 %) | 9.31 s (0 %), 37.36 s (? %) |
Caption: The individual
timings for each algorithm and choice of settings on problems (1)–(3),
and the corresponding “tensor-2” problems discussed at
<a href="#eq:tensor-2" data-reference-type="eqref" data-reference="eq:tensor-2">[eq:tensor-2]</a>.
For each algorithm, the running time of the original problem is given
followed by the running time on the “tensor-2” problem, e.g. the SDP
formulation with MOSEK on the two random qubits problem was solved in
8.69 seconds, and in 8.84 seconds for the corresponding tensor-2
problem. “timeout” is written whenever the problem was not solved within
5 minutes. For the active set algorithms, the relative error is also
given for each problem in parenthesis. Note that the MISDP formulation
with *M* = *d*<sub>*B*</sub> is also only known to be an upper bound,
but a relative error of less than 10<sup> − 5</sup> in each instance, so
the relative errors are omitted. Lastly, the relative error is written
as ? % in the case that only an upper bound was obtained.
| Algorithm | Parameters | Three random qubits | Three random qutrits | BB84 states |
|:--------------------------------------|:--------------------------|:------------------------------|:-------------------------------|:-----------------------------|
| MISDP (*d*<sub>*B*</sub>) | Pajarito (c1) | 23.63 s, timeout | 24.49 s, timeout | 23.51 s, timeout |
| | Pajarito (c2) | 23.17 s, timeout | 26.50 s, timeout | 25.01 s, timeout |
| | Pajarito (o) | 27.11 s, timeout | 95.01 s, timeout | 73.80 s, timeout |
| MISDP (*d*<sub>*B*</sub><sup>2</sup>) | Pajarito (c1) | 25.82 s (0.00 %), timeout | timeout, timeout | 68.35 s (0.00 %), timeout |
| | Pajarito (c2) | 26.38 s (0.00 %), timeout | timeout, timeout | timeout, timeout |
| | Pajarito (o) | 144.93 s (0.00 %), timeout | timeout, timeout | timeout, timeout |
| SDP | MOSEK | 9.35 s, timeout | 8.82 s, timeout | 8.87 s, error |
| | SCS | 9.09 s, timeout | 8.46 s, timeout | 8.54 s, error |
| SDP (dual) | MOSEK | 9.14 s, timeout | 8.55 s, timeout | 8.62 s, error |
| | SCS | 8.86 s, timeout | 8.25 s, timeout | 8.23 s, error |
| Active set upper bound (MOSEK) | *t*<sub>max</sub> = 20 s | 9.73 s (1.3 %), 32.29 s (? %) | 9.50 s (5.8 %), timeout | 9.51 s (0 %), 27.72 s (? %) |
| | *t*<sub>max</sub> = 60 s | 9.69 s (1.3 %), 24.12 s (? %) | 9.45 s (5.8 %), timeout | 9.77 s (0 %), 66.47 s (? %) |
| | *t*<sub>max</sub> = 240 s | 9.69 s (1.3 %), 30.42 s (? %) | 9.45 s (5.8 %), timeout | 9.49 s (0 %), 133.81 s (? %) |
| Active set upper bound (SCS) | *t*<sub>max</sub> = 20 s | 9.44 s (1.3 %), 35.66 s (? %) | 9.67 s (5.8 %), 84.43 s (? %) | 9.09 s (0 %), 28.76 s (? %) |
| | *t*<sub>max</sub> = 60 s | 9.44 s (1.3 %), 54.60 s (? %) | 9.11 s (5.8 %), 155.51 s (? %) | 9.29 s (0 %), 70.57 s (? %) |
| | *t*<sub>max</sub> = 240 s | 8.89 s (1.3 %), 75.43 s (? %) | 9.01 s (5.8 %), timeout | 9.15 s (0 %), 177.64 s (? %) |
Caption: The
individual timings for each algorithm and choice of settings on problems
(4)–(6). See the previous caption for a description of the quantities shown. Here, “error”
means the solution was not obtained due to an error (such as running out
of memory).
[1] The maximum time can still be exceeded, since at least one iteration
must be performed and the estimate can be wrong.
# References
* [2] Coey, Chris, Miles Lubin, and Juan Pablo Vielma. 2018. “Outer Approximation With Conic Certificates For Mixed-Integer Convex Problems.” ArXiv:1808.05290 [Math], August. http://arxiv.org/abs/1808.05290.
* [3] Hanson, Eric P., Vishal Katariya, Nilanjana Datta, and Mark M. Wilde. 2020. “Guesswork with Quantum Side Information.” ArXiv:2001.03598 [Quant-Ph], February. http://arxiv.org/abs/2001.03598.
* [4] johnjforrest, Stefan Vigerske, Ted Ralphs, Haroldo Gambini Santos, Lou Hafer, Bjarni Kristjansson, jpfasano, et al. 2019. Coin-or/Cbc: Version 2.10.3. Zenodo. https://doi.org/10.5281/zenodo.3246628.
* [5] O’Donoghue, B., E. Chu, N. Parikh, and S. Boyd. 2016. “Conic Optimization via Operator Splitting and Homogeneous Self-Dual Embedding.” Journal of Optimization Theory and Applications 169 (3): 1042–68.
* [6] Udell, Madeleine, Karanveer Mohan, David Zeng, Jenny Hong, Steven Diamond, and Stephen Boyd. 2014. “Convex Optimization in Julia.” In Proceedings of the 1st First Workshop for High Performance Technical Computing in Dynamic Languages, 18–28.
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | docs | 77886 | # A log of the output from the numeric experiments
```julia
GuessworkQuantumSideInfo.jl/numeric_experiments on numeric_experiments [$?]
15:47:39 ❯ julia --project=.
_
_ _ _(_)_ | Documentation: https://docs.julialang.org
(_) | (_) (_) |
_ _ _| |_ __ _ | Type "?" for help, "]?" for Pkg help.
| | | | | | |/ _` | |
| | |_| | | | (_| | | Version 1.5.1 (2020-08-25)
_/ |\__'_|_|_|\__'_| | Official https://julialang.org/ release
|__/ |
julia> versioninfo()
Julia Version 1.5.1
Commit 697e782ab8 (2020-08-25 20:08 UTC)
Platform Info:
OS: Linux (x86_64-pc-linux-gnu)
CPU: Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-9.0.1 (ORCJIT, skylake)
Environment:
JULIA_REVISE_POLL = 1
julia>
GuessworkQuantumSideInfo.jl/numeric_experiments on numeric_experiments [$?] took 1m44s
15:49:29 ❯ julia --project=. --startup-file=no
_
_ _ _(_)_ | Documentation: https://docs.julialang.org
(_) | (_) (_) |
_ _ _| |_ __ _ | Type "?" for help, "]?" for Pkg help.
| | | | | | |/ _` | |
| | |_| | | | (_| | | Version 1.5.1 (2020-08-25)
_/ |\__'_|_|_|\__'_| | Official https://julialang.org/ release
|__/ |
julia> include("run_problems.jl")
Academic license - for non-commercial use only
Problem 1/12, algorithm 1/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:45
Finished!
[ Info: SubString{String}["1.254935008191061", "22.8479812"]
Problem 1/12, algorithm 2/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:45
Finished!
[ Info: SubString{String}["1.254934996180558", "23.3493885"]
Problem 1/12, algorithm 3/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:47
Finished!
[ Info: SubString{String}["1.2549349930201852", "24.7053996"]
Problem 1/12, algorithm 4/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.254935008096305", "8.6902007"]
Problem 1/12, algorithm 5/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.254934989188837", "8.458288"]
Problem 1/12, algorithm 6/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.4999999999980487", "8.7578906"]
Problem 1/12, algorithm 7/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:32
Finished!
[ Info: SubString{String}["1.4999999999980487", "10.9102688"]
Problem 1/12, algorithm 8/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.4999999999980487", "9.4719107"]
Problem 1/12, algorithm 9/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.254934948577658", "9.0020535"]
Problem 1/12, algorithm 10/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.2549349952019324", "8.7551425"]
Problem 1/12, algorithm 11/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.499999999879668", "9.0447568"]
Problem 1/12, algorithm 12/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.499999999879668", "9.0737937"]
Problem 1/12, algorithm 13/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.499999999879668", "9.0386222"]
Problem 2/12, algorithm 1/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:56
Finished!
[ Info: SubString{String}["1.5315835908607798", "33.7600876"]
Problem 2/12, algorithm 2/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:50
Finished!
[ Info: SubString{String}["1.5315835696345121", "27.0816645"]
Problem 2/12, algorithm 3/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 2/12, algorithm 4/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.5315835982285744", "9.3482174"]
Problem 2/12, algorithm 5/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.5315835639649773", "9.1440335"]
Problem 2/12, algorithm 6/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.55143345179782", "9.7268027"]
Problem 2/12, algorithm 7/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.55143345179782", "9.6872898"]
Problem 2/12, algorithm 8/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.55143345179782", "9.6855354"]
Problem 2/12, algorithm 9/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.5315836482762242", "9.0878709"]
Problem 2/12, algorithm 10/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.5315835761159902", "8.8595117"]
Problem 2/12, algorithm 11/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.5514335194601956", "9.4359064"]
Problem 2/12, algorithm 12/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.5514335194601956", "9.4387474"]
Problem 2/12, algorithm 13/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.5514335194601956", "8.8855632"]
Problem 3/12, algorithm 1/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:57
Finished!
┌ Warning: MOSEK warning 705: #45 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1529) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1530) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1531) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1532) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1533) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1534) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1535) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1536) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1537) of matrix 'A'.
│ MOSEK warning 705: #45 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1559) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1560) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1561) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1562) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1563) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1564) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1565) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1566) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1567) of matrix 'A'.
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:47
[ Info: SubString{String}["MOSEK warning 705: #45 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1529) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1530) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1531) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1532) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1533) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1534) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1535) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1536) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1537) of matrix 'A'.", "MOSEK warning 705: #45 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1559) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1560) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1561) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1562) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1563) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1564) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1565) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1566) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1567) of matrix 'A'.", "1.422649790197531", "32.9258842"]
Problem 3/12, algorithm 2/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:50
Finished!
┌ Warning: MOSEK warning 705: #45 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1529) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1530) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1531) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1532) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1533) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1534) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1535) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1536) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1537) of matrix 'A'.
│ MOSEK warning 705: #45 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1559) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1560) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1561) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1562) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1563) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1564) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1565) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1566) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1567) of matrix 'A'.
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:47
[ Info: SubString{String}["MOSEK warning 705: #45 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1529) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1530) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1531) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1532) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1533) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1534) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1535) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1536) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1537) of matrix 'A'.", "MOSEK warning 705: #45 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1559) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1560) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1561) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1562) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1563) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1564) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1565) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1566) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1567) of matrix 'A'.", "1.4226497382896444", "27.6751057"]
Problem 3/12, algorithm 3/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 3/12, algorithm 4/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.422649741094202", "9.326394"]
Problem 3/12, algorithm 5/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.4226497338611328", "9.1115323"]
Problem 3/12, algorithm 6/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.422649730078765", "9.7188591"]
Problem 3/12, algorithm 7/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.4226497077704807", "9.6625051"]
Problem 3/12, algorithm 8/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.4226497301069139", "9.8102977"]
Problem 3/12, algorithm 9/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.4226494912208716", "8.976787"]
Problem 3/12, algorithm 10/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.422649730963161", "8.741583"]
Problem 3/12, algorithm 11/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.4226497307011863", "9.2282523"]
Problem 3/12, algorithm 12/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.4226497307011854", "9.1781923"]
Problem 3/12, algorithm 13/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.4226497307011863", "9.3057953"]
Problem 4/12, algorithm 1/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 4/12, algorithm 2/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 4/12, algorithm 3/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 4/12, algorithm 4/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:31
Finished!
[ Info: SubString{String}["1.709430585203046", "8.8681798"]
Problem 4/12, algorithm 5/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:31
Finished!
[ Info: SubString{String}["1.70943058402584", "8.6237742"]
Problem 4/12, algorithm 6/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.7094305753904342", "9.5134354"]
Problem 4/12, algorithm 7/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.7094305810180206", "9.7677162"]
Problem 4/12, algorithm 8/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:32
Finished!
[ Info: SubString{String}["1.7094305248798227", "9.4909807"]
Problem 4/12, algorithm 9/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.7094306214229236", "8.5401641"]
Problem 4/12, algorithm 10/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:31
Finished!
[ Info: SubString{String}["1.7094305848837652", "8.2306411"]
Problem 4/12, algorithm 11/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:31
Finished!
[ Info: SubString{String}["1.7094305682408302", "9.0949739"]
Problem 4/12, algorithm 12/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.7094305843004052", "9.2856356"]
Problem 4/12, algorithm 13/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:28
Finished!
[ Info: SubString{String}["1.7094305682408737", "9.1506415"]
Problem 5/12, algorithm 1/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:52
Finished!
[ Info: SubString{String}["1.255145077130398", "30.3665841"]
Problem 5/12, algorithm 2/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:56
Finished!
[ Info: SubString{String}["1.255145070937713", "33.8677304"]
Problem 5/12, algorithm 3/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 5/12, algorithm 4/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.2551449846482652", "8.7806062"]
Problem 5/12, algorithm 5/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.255144970810816", "8.5387358"]
Problem 5/12, algorithm 6/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.4999999995692301", "8.8869137"]
Problem 5/12, algorithm 7/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:31
Finished!
[ Info: SubString{String}["1.4999999995692301", "8.8707072"]
Problem 5/12, algorithm 8/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.4999999995692301", "8.9047032"]
Problem 5/12, algorithm 9/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.2551450482354998", "8.4399617"]
Problem 5/12, algorithm 10/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.2551449788825657", "8.3335215"]
Problem 5/12, algorithm 11/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:31
Finished!
[ Info: SubString{String}["1.499999997301028", "8.699171"]
Problem 5/12, algorithm 12/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.499999997301028", "8.6579571"]
Problem 5/12, algorithm 13/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.499999997301028", "8.7902263"]
Problem 6/12, algorithm 1/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 6/12, algorithm 2/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 6/12, algorithm 3/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 6/12, algorithm 4/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.471036475781681", "8.8248976"]
Problem 6/12, algorithm 5/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:28
Finished!
[ Info: SubString{String}["1.4710363761428709", "8.5505169"]
Problem 6/12, algorithm 6/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.5562851836654978", "9.5011555"]
Problem 6/12, algorithm 7/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.5562851836654978", "9.4520119"]
Problem 6/12, algorithm 8/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.5562851836654978", "9.4534974"]
Problem 6/12, algorithm 9/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:28
Finished!
[ Info: SubString{String}["1.4710366265761445", "8.4558049"]
Problem 6/12, algorithm 10/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:28
Finished!
[ Info: SubString{String}["1.4710362238910173", "8.2458754"]
Problem 6/12, algorithm 11/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.5562846392846454", "9.6662718"]
Problem 6/12, algorithm 12/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.5562846392846454", "9.107038"]
Problem 6/12, algorithm 13/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.5562846392846454", "9.0146762"]
Problem 7/12, algorithm 1/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 7/12, algorithm 2/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 7/12, algorithm 3/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 7/12, algorithm 4/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:32
Finished!
[ Info: SubString{String}["1.7503204060173534", "8.8412297"]
Problem 7/12, algorithm 5/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.750320341208066", "8.6292731"]
Problem 7/12, algorithm 6/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:31
Finished!
[ Info: SubString{String}["1.7763899365151006", "10.4054094"]
Problem 7/12, algorithm 7/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.7745147449590317", "10.4099024"]
Problem 7/12, algorithm 8/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.7763899365182758", "10.4019568"]
Problem 7/12, algorithm 9/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.7503204685792013", "8.8986134"]
Problem 7/12, algorithm 10/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.7503217558950943", "8.3211818"]
Problem 7/12, algorithm 11/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:32
Finished!
[ Info: SubString{String}["1.7763899165999122", "10.916788"]
Problem 7/12, algorithm 12/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:31
Finished!
[ Info: SubString{String}["1.7745140186833968", "10.2178801"]
Problem 7/12, algorithm 13/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:31
Finished!
[ Info: SubString{String}["1.7744435625346329", "10.0230561"]
Problem 8/12, algorithm 1/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 8/12, algorithm 2/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 8/12, algorithm 3/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 8/12, algorithm 4/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 8/12, algorithm 5/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:07
Timed out!
Problem 8/12, algorithm 6/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:57
Finished!
[ Info: SubString{String}["2.7672182086517387", "32.2916971"]
Problem 8/12, algorithm 7/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:45
Finished!
[ Info: SubString{String}["2.7676240967276766", "24.1154537"]
Problem 8/12, algorithm 8/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:51
Finished!
[ Info: SubString{String}["2.7676240668569907", "30.4157592"]
Problem 8/12, algorithm 9/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:02
Timed out!
Problem 8/12, algorithm 10/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:02
Timed out!
Problem 8/12, algorithm 11/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:58
Finished!
[ Info: SubString{String}["2.7672184622684357", "35.6556466"]
Problem 8/12, algorithm 12/13 100%|█████████████████████████████████████████████████████████████| Time: 0:01:18
Finished!
[ Info: SubString{String}["2.7672181040034545", "54.6013249"]
Problem 8/12, algorithm 13/13 100%|█████████████████████████████████████████████████████████████| Time: 0:01:37
Finished!
[ Info: SubString{String}["2.7672047842754983", "75.4263761"]
Problem 9/12, algorithm 1/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 2/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 3/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 4/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 5/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:09
Timed out!
Problem 9/12, algorithm 6/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:59
Finished!
[ Info: SubString{String}["2.320856634278685", "34.2465309"]
Problem 9/12, algorithm 7/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:53
Finished!
[ Info: SubString{String}["2.320856772167582", "30.996122"]
Problem 9/12, algorithm 8/13 100%|██████████████████████████████████████████████████████████████| Time: 0:00:51
Finished!
[ Info: SubString{String}["2.3208567801177336", "30.2602784"]
Problem 9/12, algorithm 9/13 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 10/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:02
Timed out!
Problem 9/12, algorithm 11/13 100%|█████████████████████████████████████████████████████████████| Time: 0:01:45
Finished!
[ Info: SubString{String}["2.320856591038295", "82.8359903"]
Problem 9/12, algorithm 12/13 100%|█████████████████████████████████████████████████████████████| Time: 0:01:13
Finished!
[ Info: SubString{String}["2.320856747573597", "50.7868997"]
Problem 9/12, algorithm 13/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:59
Finished!
[ Info: SubString{String}["2.3208566985412618", "37.362088"]
Problem 10/12, algorithm 1/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:02
Timed out!
Problem 10/12, algorithm 2/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 3/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 4/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:25
Finished!
┌ Error: ERROR: LoadError: OutOfMemoryError()
│ Stacktrace:
│ [1] Array at ./boot.jl:406 [inlined]
│ [2] Array at ./boot.jl:415 [inlined]
│ [3] similar at ./abstractarray.jl:675 [inlined]
│ [4] similar at ./abstractarray.jl:674 [inlined]
│ [5] _array_for at ./array.jl:678 [inlined]
│ [6] collect(::Base.Generator{UnitRange{Int64},GuessworkQuantumSideInfo.var"#10#15"{Int64}}) at ./array.jl:691
│ [7] guesswork(::Array{Float64,1}, ::Array{Array{Complex{Float64},2},1}; solver::MosekSolver, K::Int64, c::Array{Float64,1}, dual::Bool, remove_repetition::Bool, povm_outcomes::Combinatorics.MultiSetPermutations{Array{Int64,1}}, verbose::Bool, debug::Bool) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/src/SDP_formulation.jl:168
│ [8] macro expansion at ./timing.jl:310 [inlined]
│ [9] (::var"#23#28"{var"#21#26"})(::NamedTuple{(:p, :ρBs, :numeric_type, :problem),Tuple{Array{Float64,1},Array{Array{Complex{Float64},2},1},DataType,String}}) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/common.jl:79
│ [10] top-level scope at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/do_problem.jl:20
│ [11] include(::Function, ::Module, ::String) at ./Base.jl:380
│ [12] include(::Module, ::String) at ./Base.jl:368
│ [13] exec_options(::Base.JLOptions) at ./client.jl:296
│ [14] _start() at ./client.jl:506
│ in expression starting at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/do_problem.jl:20
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:42
[ Info: SubString{String}[""]
ERROR: LoadError: BoundsError: attempt to access 1-element Array{SubString{String},1} at index [0:1]
Stacktrace:
[1] throw_boundserror(::Array{SubString{String},1}, ::Tuple{UnitRange{Int64}}) at ./abstractarray.jl:541
[2] checkbounds at ./abstractarray.jl:506 [inlined]
[3] getindex at ./array.jl:815 [inlined]
[4] run_problem(::Int64, ::Int64; verbose::Bool) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:51
[5] top-level scope at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:61
[6] include(::String) at ./client.jl:457
[7] top-level scope at REPL[1]:1
in expression starting at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:60
```
Added error handling and continued...
```
julia> include("run_problems.jl")
Problem 10/12, algorithm 1/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 2/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 3/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 4/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:25
Finished!
┌ Error: ERROR: LoadError: OutOfMemoryError()
│ Stacktrace:
│ [1] Array at ./boot.jl:406 [inlined]
│ [2] Array at ./boot.jl:415 [inlined]
│ [3] similar at ./abstractarray.jl:675 [inlined]
│ [4] similar at ./abstractarray.jl:674 [inlined]
│ [5] _array_for at ./array.jl:678 [inlined]
│ [6] collect(::Base.Generator{UnitRange{Int64},GuessworkQuantumSideInfo.var"#10#15"{Int64}}) at ./array.jl:691
│ [7] guesswork(::Array{Float64,1}, ::Array{Array{Complex{Float64},2},1}; solver::MosekSolver, K::Int64, c::Array{Float64,1}, dual::Bool, remove_repetition::Bool, povm_outcomes::Combinatorics.MultiSetPermutations{Array{Int64,1}}, verbose::Bool, debug::Bool) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/src/SDP_formulation.jl:168
│ [8] macro expansion at ./timing.jl:310 [inlined]
│ [9] (::var"#23#28"{var"#21#26"})(::NamedTuple{(:p, :ρBs, :numeric_type, :problem),Tuple{Array{Float64,1},Array{Array{Complex{Float64},2},1},DataType,String}}) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/common.jl:79
│ [10] top-level scope at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/do_problem.jl:20
│ [11] include(::Function, ::Module, ::String) at ./Base.jl:380
│ [12] include(::Module, ::String) at ./Base.jl:368
│ [13] exec_options(::Base.JLOptions) at ./client.jl:296
│ [14] _start() at ./client.jl:506
│ in expression starting at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/do_problem.jl:20
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:42
[ Info: SubString{String}[""]
┌ Error: Not enough returns, something went wrong
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:52
Problem 10/12, algorithm 5/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:24
Finished!
┌ Error: ERROR: LoadError: OutOfMemoryError()
│ Stacktrace:
│ [1] Array at ./boot.jl:406 [inlined]
│ [2] _array_for at ./array.jl:677 [inlined]
│ [3] guesswork(::Array{Float64,1}, ::Array{Array{Complex{Float64},2},1}; solver::MosekSolver, K::Int64, c::Array{Float64,1}, dual::Bool, remove_repetition::Bool, povm_outcomes::Combinatorics.MultiSetPermutations{Array{Int64,1}}, verbose::Bool, debug::Bool) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/src/SDP_formulation.jl:143
│ [4] macro expansion at ./timing.jl:310 [inlined]
│ [5] (::var"#24#29"{var"#21#26"})(::NamedTuple{(:p, :ρBs, :numeric_type, :problem),Tuple{Array{Float64,1},Array{Array{Complex{Float64},2},1},DataType,String}}) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/common.jl:82
│ [6] top-level scope at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/do_problem.jl:20
│ [7] include(::Function, ::Module, ::String) at ./Base.jl:380
│ [8] include(::Module, ::String) at ./Base.jl:368
│ [9] exec_options(::Base.JLOptions) at ./client.jl:296
│ [10] _start() at ./client.jl:506
│ in expression starting at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/do_problem.jl:20
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:42
[ Info: SubString{String}[""]
┌ Error: Not enough returns, something went wrong
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:52
Problem 10/12, algorithm 6/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:47
Finished!
[ Info: SubString{String}["3.7811860979433263", "27.7171467"]
Problem 10/12, algorithm 7/13 100%|█████████████████████████████████████████████████████████████| Time: 0:01:29
Finished!
[ Info: SubString{String}["3.771192992150866", "66.469113"]
Problem 10/12, algorithm 8/13 100%|█████████████████████████████████████████████████████████████| Time: 0:02:35
Finished!
[ Info: SubString{String}["3.7682775167884057", "133.8114008"]
Problem 10/12, algorithm 9/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:24
Finished!
┌ Error: ERROR: LoadError: OutOfMemoryError()
│ Stacktrace:
│ [1] Array at ./boot.jl:406 [inlined]
│ [2] Array at ./boot.jl:415 [inlined]
│ [3] similar at ./abstractarray.jl:675 [inlined]
│ [4] similar at ./abstractarray.jl:674 [inlined]
│ [5] _array_for at ./array.jl:678 [inlined]
│ [6] collect(::Base.Generator{UnitRange{Int64},GuessworkQuantumSideInfo.var"#10#15"{Int64}}) at ./array.jl:691
│ [7] guesswork(::Array{Float64,1}, ::Array{Array{Complex{Float64},2},1}; solver::SCSSolver, K::Int64, c::Array{Float64,1}, dual::Bool, remove_repetition::Bool, povm_outcomes::Combinatorics.MultiSetPermutations{Array{Int64,1}}, verbose::Bool, debug::Bool) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/src/SDP_formulation.jl:168
│ [8] macro expansion at ./timing.jl:310 [inlined]
│ [9] (::var"#23#28"{var"#22#27"})(::NamedTuple{(:p, :ρBs, :numeric_type, :problem),Tuple{Array{Float64,1},Array{Array{Complex{Float64},2},1},DataType,String}}) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/common.jl:79
│ [10] top-level scope at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/do_problem.jl:20
│ [11] include(::Function, ::Module, ::String) at ./Base.jl:380
│ [12] include(::Module, ::String) at ./Base.jl:368
│ [13] exec_options(::Base.JLOptions) at ./client.jl:296
│ [14] _start() at ./client.jl:506
│ in expression starting at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/do_problem.jl:20
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:42
[ Info: SubString{String}[""]
┌ Error: Not enough returns, something went wrong
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:52
Problem 10/12, algorithm 10/13 100%|████████████████████████████████████████████████████████████| Time: 0:00:24
Finished!
┌ Error: ERROR: LoadError: OutOfMemoryError()
│ Stacktrace:
│ [1] Array at ./boot.jl:406 [inlined]
│ [2] _array_for at ./array.jl:677 [inlined]
│ [3] guesswork(::Array{Float64,1}, ::Array{Array{Complex{Float64},2},1}; solver::SCSSolver, K::Int64, c::Array{Float64,1}, dual::Bool, remove_repetition::Bool, povm_outcomes::Combinatorics.MultiSetPermutations{Array{Int64,1}}, verbose::Bool, debug::Bool) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/src/SDP_formulation.jl:143
│ [4] macro expansion at ./timing.jl:310 [inlined]
│ [5] (::var"#24#29"{var"#22#27"})(::NamedTuple{(:p, :ρBs, :numeric_type, :problem),Tuple{Array{Float64,1},Array{Array{Complex{Float64},2},1},DataType,String}}) at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/common.jl:82
│ [6] top-level scope at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/do_problem.jl:20
│ [7] include(::Function, ::Module, ::String) at ./Base.jl:380
│ [8] include(::Module, ::String) at ./Base.jl:368
│ [9] exec_options(::Base.JLOptions) at ./client.jl:296
│ [10] _start() at ./client.jl:506
│ in expression starting at /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/do_problem.jl:20
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:42
[ Info: SubString{String}[""]
┌ Error: Not enough returns, something went wrong
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:52
Problem 10/12, algorithm 11/13 100%|████████████████████████████████████████████████████████████| Time: 0:00:49
Finished!
[ Info: SubString{String}["3.779617362857275", "28.760714"]
Problem 10/12, algorithm 12/13 100%|████████████████████████████████████████████████████████████| Time: 0:01:32
Finished!
[ Info: SubString{String}["3.7715124469415007", "70.5699564"]
Problem 10/12, algorithm 13/13 100%|████████████████████████████████████████████████████████████| Time: 0:03:22
Finished!
[ Info: SubString{String}["3.768213428570261", "177.6444008"]
Problem 11/12, algorithm 1/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 2/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 3/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 4/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.7368579207940624", "9.2295285"]
Problem 11/12, algorithm 5/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:29
Finished!
[ Info: SubString{String}["1.7368579132221822", "8.8348357"]
Problem 11/12, algorithm 6/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 7/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 8/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 9/13 100%|█████████████████████████████████████████████████████████████| Time: 0:00:32
Finished!
[ Info: SubString{String}["1.7368580657916646", "11.2161223"]
Problem 11/12, algorithm 10/13 100%|████████████████████████████████████████████████████████████| Time: 0:00:30
Finished!
[ Info: SubString{String}["1.7368575755119444", "9.1961901"]
Problem 11/12, algorithm 11/13 100%|████████████████████████████████████████████████████████████| Time: 0:02:02
Finished!
[ Info: SubString{String}["1.7554742953569498", "101.0598171"]
Problem 11/12, algorithm 12/13 100%|████████████████████████████████████████████████████████████| Time: 0:00:53
Finished!
[ Info: SubString{String}["1.7578147992900353", "32.9359894"]
Problem 11/12, algorithm 13/13 100%|████████████████████████████████████████████████████████████| Time: 0:00:44
Finished!
[ Info: SubString{String}["1.7583289930168684", "22.6860797"]
Problem 12/12, algorithm 1/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 12/12, algorithm 2/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 12/12, algorithm 3/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 12/12, algorithm 4/13 100%|█████████████████████████████████████████████████████████████| Time: 0:04:09
Finished!
[ Info: SubString{String}[""]
┌ Error: Not enough returns, something went wrong
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:52
Problem 12/12, algorithm 5/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:02
Timed out!
Problem 12/12, algorithm 6/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:02
Timed out!
Problem 12/12, algorithm 7/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:02
Timed out!
Problem 12/12, algorithm 8/13 100%|█████████████████████████████████████████████████████████████| Time: 0:05:02
Timed out!
Problem 12/12, algorithm 9/13 100%|█████████████████████████████████████████████████████████████| Time: 0:03:33
Finished!
[ Info: SubString{String}[""]
┌ Error: Not enough returns, something went wrong
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:52
Problem 12/12, algorithm 10/13 100%|████████████████████████████████████████████████████████████| Time: 0:05:03
Timed out!
Problem 12/12, algorithm 11/13 100%|████████████████████████████████████████████████████████████| Time: 0:01:47
Finished!
[ Info: SubString{String}["2.6455718400755233", "84.4330259"]
Problem 12/12, algorithm 12/13 100%|████████████████████████████████████████████████████████████| Time: 0:02:56
Finished!
[ Info: SubString{String}["2.639131319680283", "155.5140639"]
Problem 12/12, algorithm 13/13 100%|████████████████████████████████████████████████████████████| Time: 0:05:03
Timed out!
39×8 DataFrame. Omitted printing of 4 columns
│ Row │ algo │ settings │ numeric_type │ problem │
│ │ String │ String │ String │ String │
├─────┼─────────────────────────────────────┼────────────────────────────────────┼──────────────┼─────────────┤
│ 1 │ MISDP │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ BB84(2) │
│ 2 │ MISDP │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ BB84(2) │
│ 3 │ MISDP │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ BB84(2) │
│ 4 │ SDP │ Mosek │ Float64 │ BB84(2) │
│ 5 │ dual_SDP │ Mosek │ Float64 │ BB84(2) │
│ 6 │ guesswork_upper_bound(max_time=20) │ Mosek │ Float64 │ BB84(2) │
│ 7 │ guesswork_upper_bound(max_time=60) │ Mosek │ Float64 │ BB84(2) │
│ 8 │ guesswork_upper_bound(max_time=240) │ Mosek │ Float64 │ BB84(2) │
│ 9 │ SDP │ SCS │ Float64 │ BB84(2) │
│ 10 │ dual_SDP │ SCS │ Float64 │ BB84(2) │
⋮
│ 29 │ MISDP │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ 3qutrits(2) │
│ 30 │ SDP │ Mosek │ Float64 │ 3qutrits(2) │
│ 31 │ dual_SDP │ Mosek │ Float64 │ 3qutrits(2) │
│ 32 │ guesswork_upper_bound(max_time=20) │ Mosek │ Float64 │ 3qutrits(2) │
│ 33 │ guesswork_upper_bound(max_time=60) │ Mosek │ Float64 │ 3qutrits(2) │
│ 34 │ guesswork_upper_bound(max_time=240) │ Mosek │ Float64 │ 3qutrits(2) │
│ 35 │ SDP │ SCS │ Float64 │ 3qutrits(2) │
│ 36 │ dual_SDP │ SCS │ Float64 │ 3qutrits(2) │
│ 37 │ guesswork_upper_bound(max_time=20) │ SCS │ Float64 │ 3qutrits(2) │
│ 38 │ guesswork_upper_bound(max_time=60) │ SCS │ Float64 │ 3qutrits(2) │
│ 39 │ guesswork_upper_bound(max_time=240) │ SCS │ Float64 │ 3qutrits(2) │
```
Added `MISDP_dB` setting and ran the problems with those options:
```
julia> include("run_problems.jl")
Problem 1/12, algorithm 1/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:48
Finished!
[ Info: SubString{String}["1.2549350503546497", "22.9851179"]
Problem 1/12, algorithm 3/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:49
Finished!
[ Info: SubString{String}["1.2549350503546497", "23.6271632"]
Problem 1/12, algorithm 5/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:48
Finished!
[ Info: SubString{String}["1.2549359025427322", "23.4738548"]
Problem 2/12, algorithm 1/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:46
Finished!
[ Info: SubString{String}["1.531583578752242", "23.1706345"]
Problem 2/12, algorithm 3/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:48
Finished!
[ Info: SubString{String}["1.5315835787522418", "23.630824"]
Problem 2/12, algorithm 5/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:52
Finished!
[ Info: SubString{String}["1.5315876003496776", "27.1068016"]
Problem 3/12, algorithm 1/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:48
Finished!
┌ Warning: MOSEK warning 705: #18 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(617) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(618) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(619) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(620) of matrix 'A'.
│ MOSEK warning 705: #18 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(629) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(630) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(631) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(632) of matrix 'A'.
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:47
[ Info: SubString{String}["MOSEK warning 705: #18 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(617) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(618) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(619) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(620) of matrix 'A'.", "MOSEK warning 705: #18 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(629) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(630) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(631) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(632) of matrix 'A'.", "1.4226497495723145", "23.2142919"]
Problem 3/12, algorithm 3/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:49
Finished!
┌ Warning: MOSEK warning 705: #18 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(617) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(618) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(619) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(620) of matrix 'A'.
│ MOSEK warning 705: #18 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(629) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(630) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(631) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(632) of matrix 'A'.
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:47
[ Info: SubString{String}["MOSEK warning 705: #18 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(617) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(618) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(619) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(620) of matrix 'A'.", "MOSEK warning 705: #18 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(629) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(630) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(631) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(632) of matrix 'A'.", "1.4226497297957217", "23.5631458"]
Problem 3/12, algorithm 5/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:49
Finished!
┌ Error: ┌ Warning: Repeated integer solution without converging
│ └ @ Pajarito ~/.julia/packages/Pajarito/TFExZ/src/conic_algorithm.jl:1687
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:42
[ Info: SubString{String}["1.422648765315336", "26.1466155"]
Problem 4/12, algorithm 1/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:49
Finished!
[ Info: SubString{String}["1.7094305955725198", "25.0106493"]
Problem 4/12, algorithm 3/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:47
Finished!
[ Info: SubString{String}["1.7094305955725198", "23.5142924"]
Problem 4/12, algorithm 5/16 100%|██████████████████████████████████████████████████████████████| Time: 0:01:39
Finished!
[ Info: SubString{String}["1.7094293534710712", "73.8001398"]
Problem 5/12, algorithm 1/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:50
Finished!
[ Info: SubString{String}["1.255145017108577", "23.3100727"]
Problem 5/12, algorithm 3/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:50
Finished!
[ Info: SubString{String}["1.255145017108577", "23.596305"]
Problem 5/12, algorithm 5/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:49
Finished!
[ Info: SubString{String}["1.2551468576010365", "24.7745932"]
Problem 6/12, algorithm 1/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:49
Finished!
[ Info: SubString{String}["1.4710364763587662", "26.4991661"]
Problem 6/12, algorithm 3/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:48
Finished!
[ Info: SubString{String}["1.4710364763587662", "24.4914356"]
Problem 6/12, algorithm 5/16 100%|██████████████████████████████████████████████████████████████| Time: 0:01:57
Finished!
┌ Error: ┌ Warning: Repeated integer solution without converging
│ └ @ Pajarito ~/.julia/packages/Pajarito/TFExZ/src/conic_algorithm.jl:1687
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:42
[ Info: SubString{String}["1.4710321441071241", "95.0050658"]
Problem 7/12, algorithm 1/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 7/12, algorithm 3/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 7/12, algorithm 5/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 8/12, algorithm 1/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 8/12, algorithm 3/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 8/12, algorithm 5/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 1/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 3/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 5/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 1/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 3/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 5/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 1/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 3/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 5/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 12/12, algorithm 1/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 12/12, algorithm 3/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 12/12, algorithm 5/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
36×8 DataFrame. Omitted printing of 2 columns
│ Row │ algo │ settings │ numeric_type │ problem │ optval │ elapsed_seconds │
│ │ String │ String │ String │ String │ Float64 │ Float64 │
├─────┼──────────┼────────────────────────────────────┼──────────────┼─────────────┼─────────┼─────────────────┤
│ 1 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ 2qubits(1) │ 1.25494 │ 22.9851 │
│ 2 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ 2qubits(1) │ 1.25494 │ 23.6272 │
│ 3 │ MISDP_dB │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ 2qubits(1) │ 1.25494 │ 23.4739 │
│ 4 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ 3qubits(1) │ 1.53158 │ 23.1706 │
│ 5 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ 3qubits(1) │ 1.53158 │ 23.6308 │
│ 6 │ MISDP_dB │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ 3qubits(1) │ 1.53159 │ 27.1068 │
│ 7 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ Y(1) │ 1.42265 │ 23.2143 │
│ 8 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ Y(1) │ 1.42265 │ 23.5631 │
│ 9 │ MISDP_dB │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ Y(1) │ 1.42265 │ 26.1466 │
│ 10 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ BB84(1) │ 1.70943 │ 25.0106 │
⋮
│ 26 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ Y(2) │ NaN │ NaN │
│ 27 │ MISDP_dB │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ Y(2) │ NaN │ NaN │
│ 28 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ BB84(2) │ NaN │ NaN │
│ 29 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ BB84(2) │ NaN │ NaN │
│ 30 │ MISDP_dB │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ BB84(2) │ NaN │ NaN │
│ 31 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ 2qutrits(2) │ NaN │ NaN │
│ 32 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ 2qutrits(2) │ NaN │ NaN │
│ 33 │ MISDP_dB │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ 2qutrits(2) │ NaN │ NaN │
│ 34 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ 3qutrits(2) │ NaN │ NaN │
│ 35 │ MISDP_dB │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ 3qutrits(2) │ NaN │ NaN │
│ 36 │ MISDP_dB │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ 3qutrits(2) │ NaN │ NaN │
```
Reran the MISDP problems with M=dB^2 (instead of dB^2 + 1, which is not necessary).
Annoyingly, the algorithm indices are off in the following log, because I had
removed the `MISDP` version from `algos`. It doesn't matter though, since
there were no OOMs to get from the logs.
```julia
GuessworkQuantumSideInfo.jl/numeric_experiments on numeric_experiments [$»!]
13:27:14 ❯ julia --project=. --startup-file=no
_
_ _ _(_)_ | Documentation: https://docs.julialang.org
(_) | (_) (_) |
_ _ _| |_ __ _ | Type "?" for help, "]?" for Pkg help.
| | | | | | |/ _` | |
| | |_| | | | (_| | | Version 1.5.1 (2020-08-25)
_/ |\__'_|_|_|\__'_| | Official https://julialang.org/ release
|__/ |
julia> include("run_problems.jl")
[ Info: Precompiling CSV [336ed68f-0bac-5ca0-87d4-7b16caf5d00b]
[ Info: Precompiling GuessworkQuantumSideInfo [43bf2e5e-2e2b-4a4c-a80a-78ff5d909e07]
[ Info: Precompiling Pajarito [2f354839-79df-5901-9f0a-cdb2aac6fe30]
Academic license - for non-commercial use only
Problem 1/12, algorithm 2/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:49
Finished!
[ Info: SubString{String}["1.2549350272184832", "25.0180601"]
Problem 1/12, algorithm 4/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:47
Finished!
[ Info: SubString{String}["1.2549349960643177", "24.4915273"]
Problem 1/12, algorithm 6/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:50
Finished!
[ Info: SubString{String}["1.2549344493592418", "26.5418423"]
Problem 2/12, algorithm 2/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:50
Finished!
[ Info: SubString{String}["1.5315837236968899", "26.384784"]
Problem 2/12, algorithm 4/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:50
Finished!
[ Info: SubString{String}["1.531583569528324", "25.8218552"]
Problem 2/12, algorithm 6/16 100%|██████████████████████████████████████████████████████████████| Time: 0:02:49
Finished!
┌ Error: ┌ Warning: Repeated integer solution without converging
│ └ @ Pajarito ~/.julia/packages/Pajarito/TFExZ/src/conic_algorithm.jl:1687
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:42
[ Info: SubString{String}["1.5315809630022876", "144.9346841"]
Problem 3/12, algorithm 2/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:52
Finished!
┌ Warning: MOSEK warning 705: #36 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1225) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1226) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1227) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1228) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1229) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1230) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1231) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1232) of matrix 'A'.
│ MOSEK warning 705: #36 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1249) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1250) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1251) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1252) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1253) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1254) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1255) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1256) of matrix 'A'.
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:47
[ Info: SubString{String}["MOSEK warning 705: #36 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1225) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1226) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1227) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1228) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1229) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1230) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1231) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1232) of matrix 'A'.", "MOSEK warning 705: #36 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1249) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1250) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1251) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1252) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1253) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1254) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1255) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1256) of matrix 'A'.", "1.4226497385782253", "29.0800987"]
Problem 3/12, algorithm 4/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:49
Finished!
┌ Warning: MOSEK warning 705: #36 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1225) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1226) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1227) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1228) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1229) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1230) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1231) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1232) of matrix 'A'.
│ MOSEK warning 705: #36 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1249) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1250) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1251) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1252) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1253) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1254) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1255) of matrix 'A'.
│ MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1256) of matrix 'A'.
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:47
[ Info: SubString{String}["MOSEK warning 705: #36 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1225) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1226) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1227) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1228) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1229) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1230) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1231) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1232) of matrix 'A'.", "MOSEK warning 705: #36 (nearly) zero elements are specified in sparse row ''(0) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1249) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1250) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1251) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1252) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1253) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1254) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1255) of matrix 'A'.", "MOSEK warning 705: #6 (nearly) zero elements are specified in sparse row ''(1256) of matrix 'A'.", "1.4226497372005802", "26.2719686"]
Problem 3/12, algorithm 6/16 100%|██████████████████████████████████████████████████████████████| Time: 0:02:44
Finished!
┌ Error: ┌ Warning: Repeated integer solution without converging
│ └ @ Pajarito ~/.julia/packages/Pajarito/TFExZ/src/conic_algorithm.jl:1687
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:42
[ Info: SubString{String}["1.4226490196466606", "141.1404839"]
Problem 4/12, algorithm 2/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 4/12, algorithm 4/16 100%|██████████████████████████████████████████████████████████████| Time: 0:01:33
Finished!
[ Info: SubString{String}["1.7094305493549533", "68.3456832"]
Problem 4/12, algorithm 6/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 5/12, algorithm 2/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:54
Finished!
[ Info: SubString{String}["1.2551450448490824", "31.3867835"]
Problem 5/12, algorithm 4/16 100%|██████████████████████████████████████████████████████████████| Time: 0:00:57
Finished!
[ Info: SubString{String}["1.255145044849353", "31.3980755"]
Problem 5/12, algorithm 6/16 100%|██████████████████████████████████████████████████████████████| Time: 0:03:59
Finished!
┌ Error: ┌ Warning: Repeated integer solution without converging
│ └ @ Pajarito ~/.julia/packages/Pajarito/TFExZ/src/conic_algorithm.jl:1687
└ @ Main /mnt/c/Users/eric/Code/GuessworkQuantumSideInfo.jl/numeric_experiments/run_problems.jl:42
[ Info: SubString{String}["1.2551424930051753", "212.7907068"]
Problem 6/12, algorithm 2/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 6/12, algorithm 4/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 6/12, algorithm 6/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 7/12, algorithm 2/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 7/12, algorithm 4/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 7/12, algorithm 6/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 8/12, algorithm 2/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 8/12, algorithm 4/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 8/12, algorithm 6/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 2/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 4/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 9/12, algorithm 6/16 100%|██████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 2/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 4/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 10/12, algorithm 6/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 2/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 4/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 11/12, algorithm 6/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 12/12, algorithm 2/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 12/12, algorithm 4/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
Problem 12/12, algorithm 6/16 100%|█████████████████████████████████████████████████████████████| Time: 0:05:01
Timed out!
36×8 DataFrame. Omitted printing of 2 columns
│ Row │ algo │ settings │ numeric_type │ problem │ optval │ elapsed_seconds │
│ │ String │ String │ String │ String │ Float64 │ Float64 │
├─────┼──────────────┼────────────────────────────────────┼──────────────┼─────────────┼─────────┼─────────────────┤
│ 1 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ 2qubits(1) │ 1.25494 │ 25.0181 │
│ 2 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ 2qubits(1) │ 1.25493 │ 24.4915 │
│ 3 │ MISDP (dB^2) │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ 2qubits(1) │ 1.25493 │ 26.5418 │
│ 4 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ 3qubits(1) │ 1.53158 │ 26.3848 │
│ 5 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ 3qubits(1) │ 1.53158 │ 25.8219 │
│ 6 │ MISDP (dB^2) │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ 3qubits(1) │ 1.53158 │ 144.935 │
│ 7 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ Y(1) │ 1.42265 │ 29.0801 │
│ 8 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ Y(1) │ 1.42265 │ 26.272 │
│ 9 │ MISDP (dB^2) │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ Y(1) │ 1.42265 │ 141.14 │
│ 10 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ BB84(1) │ NaN │ NaN │
⋮
│ 26 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ Y(2) │ NaN │ NaN │
│ 27 │ MISDP (dB^2) │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ Y(2) │ NaN │ NaN │
│ 28 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ BB84(2) │ NaN │ NaN │
│ 29 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ BB84(2) │ NaN │ NaN │
│ 30 │ MISDP (dB^2) │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ BB84(2) │ NaN │ NaN │
│ 31 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ 2qutrits(2) │ NaN │ NaN │
│ 32 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ 2qutrits(2) │ NaN │ NaN │
│ 33 │ MISDP (dB^2) │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ 2qutrits(2) │ NaN │ NaN │
│ 34 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=false) │ Float64 │ 3qutrits(2) │ NaN │ NaN │
│ 35 │ MISDP (dB^2) │ Pajarito(Mosek, Gurobi, MSD=true) │ Float64 │ 3qutrits(2) │ NaN │ NaN │
│ 36 │ MISDP (dB^2) │ Pajarito(SCS, Cbc, MSD=false) │ Float64 │ 3qutrits(2) │ NaN │ NaN │
```
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | 2f8f4dba5a8bf0aff3b7af5f2181d9aac0014529 | docs | 753 | # High precision tests
These tests use the high-precision SDP solver SDPA-GMP, via
[SDPAFamily.jl](https://github.com/ericphanson/SDPAFamily.jl). This currently
uses Convex.jl version 0.13 which is incompatible with the
Pajarito solver used for mixed-integer SDPs (until that solver is updated to use
MathOptInterface).
To run the high-precision tests, call
```
julia --project=. -e 'include("tests.jl")'
```
from the command line, with the working directory set to this folder, or enter a
Julia session with
```
juila --project=.
```
and then include the tests file via `include("tests.jl")`.
The `Manifest.toml` file is included in this folder so that a set of compatible
versions of the necessary packages can be reproduced here, using Julia 1.4.
| GuessworkQuantumSideInfo | https://github.com/ericphanson/GuessworkQuantumSideInfo.jl.git |
|
[
"MIT"
] | 0.1.2 | c385995decd042d8ccb797c42b9603ed1e459869 | code | 343 | using Documenter, GraphicalLasso
makedocs(
sitename="GraphicalLasso.jl",
format=Documenter.HTML(
prettyurls=get(ENV, "CI", nothing) == "true"
),
pages=[
"Home" => [
"index.md"
],
],
modules=[GraphicalLasso]
)
deploydocs(
repo="github.com/ivanuricardo/GraphicalLasso.jl.git",
)
| GraphicalLasso | https://github.com/ivanuricardo/GraphicalLasso.jl.git |
|
[
"MIT"
] | 0.1.2 | c385995decd042d8ccb797c42b9603ed1e459869 | code | 291 | module GraphicalLasso
using Statistics
using LinearAlgebra
using Random
using Distributions
include("./glasso.jl")
export softthresh, cdlasso, glasso
include("./utils.jl")
export offdiag, randsparsecov, iscov
include("./infocrit.jl")
export critfunc, countedges, ebic, tuningselect
end
| GraphicalLasso | https://github.com/ivanuricardo/GraphicalLasso.jl.git |
|
[
"MIT"
] | 0.1.2 | c385995decd042d8ccb797c42b9603ed1e459869 | code | 3363 |
softthresh(x, λ) = sign.(x) .* max.(abs.(x) .- λ, 0)
"""
cdlasso(W11::Matrix{T}, s12::Vector{T}, λ::Real; max_iter::Int=100, tol::T=1e-5) where {T<:Real}
Solves the coordinate descent Lasso problem.
# Arguments
- `W11::Matrix{T}`: A square matrix used in the coordinate descent update.
- `s12::Vector{T}`: A vector used in the coordinate descent update.
- `λ::Real`: Regularization parameter.
- `max_iter::Int=100`: Maximum number of iterations. (optional)
- `tol::T=1e-5`: Tolerance for the convergence criteria. (optional)
# Returns
- `Vector{T}`: Solution vector `β`.
"""
function cdlasso(
W11::AbstractMatrix{T},
s12::AbstractVector{T},
λ::Real;
max_iter::Int=100,
tol::T=1e-5) where {T<:Real}
p = length(s12)
β = zeros(p)
for _ in 1:max_iter
β_old = copy(β)
for j in 1:p
idx = setdiff(1:p, j)
r_j = s12[j] - W11[idx, j]' * β[idx]
β[j] = softthresh(r_j, λ) / W11[j, j]
end
if norm(β - β_old) < tol
break
end
end
return β
end
"""
glasso(s::Matrix{Float64}, obs::Int, λ::Real; penalizediag::Bool=true, γ::Real=0.0, tol::Float64=1e-05, verbose::Bool=true, maxiter::Int=100, winit::Matrix{Float64}=zeros(size(s)))
Applies the graphical lasso (glasso) algorithm to estimate a sparse inverse covariance matrix.
# Arguments
- `s::Matrix{Float64}`: Empirical covariance matrix.
- `obs::Int`: Number of observations.
- `λ::Real`: Regularization parameter for the lasso penalty.
- `penalizediag::Bool=true`: Whether to penalize the diagonal entries. (optional)
- `γ::Real=0.0`: EBIC tuning parameter. (optional)
- `tol::Float64=1e-05`: Tolerance for the convergence criteria. (optional)
- `verbose::Bool=true`: If true, prints convergence information. (optional)
- `maxiter::Int=100`: Maximum number of iterations. (optional)
- `winit::Matrix{Float64}=zeros(size(s))`: Initial value of the precision matrix. (optional)
# Returns
- `NamedTuple`: A named tuple with fields:
- `W::Matrix{Float64}`: Estimated precision matrix.
- `θ::Matrix{Float64}`: Estimated inverse covariance matrix.
- `ll::Float64`: Log-likelihood of the estimate.
- `bicval::Float64`: EBIC value of the estimate.
"""
function glasso(
s::AbstractMatrix{Float64},
obs::Int,
λ::Real;
penalizediag::Bool=true,
γ::Real=0.0,
tol::Float64=1e-05,
verbose::Bool=true,
maxiter::Int=100,
winit::AbstractMatrix{Float64}=zeros(size(s)),
)
p = size(s, 1)
if winit == zeros(size(s))
W = Matrix(copy(s) + (penalizediag ? λ * I : zero(s)))
else
W = Matrix(copy(winit)) + (penalizediag ? λ * I : zero(s))
end
niter = 0
for _ in 1:maxiter
niter += 1
W_old = copy(W)
for j in 1:p
idx = setdiff(1:p, j)
W11 = W[idx, idx]
s12 = s[idx, j]
βhat = cdlasso(W11, s12, λ; max_iter=maxiter, tol=tol)
W[idx, j] = W11 * βhat
W[j, idx] = W[idx, j]'
end
if norm(W - W_old, 1) < tol
if verbose
@info "glasso converged with $niter iterations."
end
break
end
end
θ = inv(W)
ll = -(obs / 2) * critfunc(s, θ, W; penalizediag)
bicval = ebic(θ, ll, obs, 1e-06, γ)
return (; W, θ, ll, bicval)
end
| GraphicalLasso | https://github.com/ivanuricardo/GraphicalLasso.jl.git |
|
[
"MIT"
] | 0.1.2 | c385995decd042d8ccb797c42b9603ed1e459869 | code | 2802 |
countedges(x, thr) = sum(abs.(x) .> thr)
"""
ebic(θ, ll, obs, thr, γ)
Calculates the Extended Bayesian Information Criterion (EBIC) for a given precision matrix `θ`.
From the paper by Foygel and Drton (2010), the EBIC is defined as:
``\\text{EBIC} = -2 \\times \\text{Log-likelihood} + \\log(n) \\times \\mathbf{E} + 4 \\times \\gamma \\times \\mathbf{E} \\times \\log(p)``
where:
- n is the number of observations.
- p is the number of variables.
- gamma is a tuning parameter.
- The number of edges is calculated as the count of entries in theta that exceed a given threshold.
# Arguments
- `θ::AbstractMatrix`: Precision matrix.
- `ll::Real`: Log-likelihood.
- `obs::Int`: Number of observations.
- `thr::Real`: Threshold value for counting edges.
- `γ::Real`: EBIC tuning parameter.
# Returns
- `Real`: EBIC value.
"""
function ebic(θ, ll, obs, thr, γ)
edgecount = countedges(θ, thr)
ebicpen = 4 * γ * edgecount * log(size(θ, 1))
return -2 * ll + (log(obs) * edgecount) + ebicpen
end
"""
critfunc(s, θ, rho; penalizediag=true)
Calculates the objective function value for the graphical lasso.
# Arguments
- `s::AbstractMatrix`: Empirical covariance matrix.
- `θ::AbstractMatrix`: Precision matrix.
- `rho::Real`: Regularization parameter.
- `penalizediag::Bool=true`: Whether to penalize the diagonal entries. (optional)
# Returns
- `Real`: Value of the objective function.
"""
function critfunc(s, θ, rho; penalizediag=true)
ψ = copy(θ)
if !penalizediag
ψ[diagind(ψ)] .= 0
end
crit = -logdet(θ) + tr(s * θ) + sum(abs.(rho * ψ))
return crit
end
"""
tuningselect(s::Matrix{Float64}, obs::Int, λ::AbstractVector{T}; γ::Real=0.0) where {T}
Selects the optimal regularization parameter `λ` for the graphical lasso using EBIC.
# Arguments
- `s::Matrix{Float64}`: Empirical covariance matrix.
- `obs::Int`: Number of observations.
- `λ::AbstractVector{T}`: Vector of regularization parameters to be tested.
- `γ::Real=0.0`: EBIC tuning parameter. (optional)
# Returns
- `T`: The optimal regularization parameter from the input vector `λ`.
"""
function tuningselect(
s::AbstractMatrix{Float64},
obs::Int,
λ::AbstractVector{T};
γ::Real=0.0,
kwargs...
) where {T}
sortedλ = sort(λ)
p, _ = size(s)
numλ = length(sortedλ)
covarray = zeros(Float64, p, p, numλ)
bicvec = Vector{Float64}(undef, numλ)
W, _, _, bicval = glasso(s, obs, sortedλ[1]; γ, kwargs...)
bicvec[1] = bicval
covarray[:, :, 1] = W
for i in 2:numλ
nextW = covarray[:, :, i-1]
glassoresult = glasso(s, obs, sortedλ[i]; γ, winit=nextW, kwargs...)
covarray[:, :, i] = glassoresult.W
bicvec[i] = glassoresult.bicval
end
lowestidx = argmin(bicvec)
return sortedλ[lowestidx]
end
| GraphicalLasso | https://github.com/ivanuricardo/GraphicalLasso.jl.git |
|
[
"MIT"
] | 0.1.2 | c385995decd042d8ccb797c42b9603ed1e459869 | code | 1421 |
offdiag(x) = x[findall(!iszero, ones(size(x)) - I)]
"""
randsparsecov(p, thr)
Generates a random sparse covariance matrix of size `p x p` with a specified threshold for sparsity.
# Arguments
- `p::Int`: The dimension of the covariance matrix.
- `thr::Real`: Threshold value for sparsity. Values below this threshold will be set to zero.
# Returns
- `Hermitian{Float64}`: A sparse covariance matrix.
"""
function randsparsecov(p, thr)
s = randn(p, p)
denseΣ = (s + s') / 2
valsΣ = eigvals(denseΣ)
vecsΣ = eigvecs(denseΣ)
unthreshΣ = vecsΣ' * Diagonal(abs.(valsΣ)) * vecsΣ
Σ = softthresh.(unthreshΣ, thr) + I
return Hermitian(Σ)
end
"""
iscov(x::AbstractMatrix{T}) where {T<:Real}
Checks if a given matrix is a valid covariance matrix. A valid covariance matrix must be square, symmetric, and positive semi-definite.
# Arguments
- `x::AbstractMatrix{T}`: Input matrix to check.
# Returns
- `Bool`: `true` if the matrix is a valid covariance matrix, `false` otherwise.
"""
function iscov(x::AbstractMatrix{T}) where {T<:Real}
n, m = size(x)
if n != m
@info "result is not square."
return false
end
if !issymmetric(x)
@info "result is not symmetric."
return false
end
xxevals = eigvals(x)
if any(xxevals .< 0)
@info "result is not positive semi-definite."
return false
end
return true
end
| GraphicalLasso | https://github.com/ivanuricardo/GraphicalLasso.jl.git |
|
[
"MIT"
] | 0.1.2 | c385995decd042d8ccb797c42b9603ed1e459869 | code | 103 | using GraphicalLasso
using Test, Statistics, LinearAlgebra, Distributions
include("./test-glasso.jl")
| GraphicalLasso | https://github.com/ivanuricardo/GraphicalLasso.jl.git |
|
[
"MIT"
] | 0.1.2 | c385995decd042d8ccb797c42b9603ed1e459869 | code | 2125 |
@testset "softthresh" begin
@test softthresh(1.0, 0.5) ≈ 0.5
@test softthresh(1.0, 1.0) ≈ 0.0
v = [1.0, 2.0, -3.0]
sv = softthresh(v, 1.5)
@test sv[1] ≈ 0.0
@test sv[2] ≈ 0.5
@test sv[3] ≈ -1.5
end
@testset "off diagonal average" begin
a = [2 1 1; 1 2 1; 1 1 2]
@test mean(offdiag(a)) ≈ 1.0
end
@testset "counting edges" begin
a = [1 2 3; 4 5 6; 7 8 9]
a[diagind(a)] .= 0
@test countedges(a, 1e-10) ≈ 6
end
@testset "Glasso returns covariance result" begin
using Random
Random.seed!(1234)
nobs = 200
df = randn(nobs, 5)
s = cov(df)
gs = glasso(s, nobs, 0.1)
@test iscov(gs.W)
end
@testset "Random sparse matrix returns covariance" begin
using Random
Random.seed!(1234)
s1 = randsparsecov(10, 0.5)
s2 = randsparsecov(20, 0.2)
s3 = randsparsecov(50, 0.5)
s4 = randsparsecov(100, 0.5)
@test iscov(s1)
@test iscov(s2)
@test iscov(s3)
@test iscov(s4)
end
@testset "large tuning parameter" begin
using Random
Random.seed!(1234)
nobs = 200
df = randn(nobs, 10)
s = cov(df)
λ = 8e10
gs = glasso(s, nobs, λ)
od = offdiag(gs.θ)
@test od ≈ zeros(90)
end
@testset "CD Lasso yields least squares solution" begin
using Random
Random.seed!(1234)
nobs = 500
X = randn(nobs, 10)
trueβ = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
y = X * trueβ + 0.1 * randn(nobs)
unmeanX = X .- mean(X, dims=1)
stdX = unmeanX ./ std(X, dims=1)
lsβ = inv(stdX' * stdX) * stdX' * y
λ = 0.0
cdlassoβ = cdlasso(stdX' * stdX, stdX' * y, λ; tol=1e-5)
@test cdlassoβ ≈ lsβ
end
@testset "CD Lasso yields correct number of zeros" begin
using Random
Random.seed!(1234)
nobs = 20
X = randn(nobs, 10)
trueβ = [0.0, 2.0, 0.0, 4.0, 0.0, 6.0, 0.0, 8.0, 0.0, 10.0]
y = X * trueβ + 0.1 * randn(nobs)
unmeanX = X .- mean(X, dims=1)
stdX = unmeanX ./ std(X, dims=1)
λ = 1.3
cdlassoβ = cdlasso(stdX' * stdX, stdX' * y, λ; tol=1e-5)
numedges = countedges(cdlassoβ, 1e-10)
@test numedges ≈ 5
end
| GraphicalLasso | https://github.com/ivanuricardo/GraphicalLasso.jl.git |
|
[
"MIT"
] | 0.1.2 | c385995decd042d8ccb797c42b9603ed1e459869 | docs | 4775 | # GraphicalLasso.jl
[](https://github.com/ivanuricardo/GraphicalLasso.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://ivanuricardo.github.io/GraphicalLasso.jl/stable)
[](https://codecov.io/gh/ivanuricardo/GraphicalLasso.jl)
This package provides efficient tools for generating sparse covariance matrices, estimating sparse precision matrices using the graphical lasso (glasso) algorithm, and selecting optimal regularization parameters.
## Key Features:
- **Sparse Covariance Matrix Generation**: Generate random sparse covariance matrices with customizable sparsity thresholds.
- **Graphical Lasso (glasso) Implementation**: Apply the glasso algorithm to estimate sparse precision matrices from empirical covariance matrices.
- **Extended Bayesian Information Criterion (EBIC)**: Calculate EBIC for model selection, supporting edge counting with thresholding.
- **Tuning Parameter Selection**: Automatically select optimal regularization parameters for the glasso algorithm using EBIC.
- **Covariance Matrix Validation**: Functions to check if a matrix is a valid covariance matrix (square, symmetric, positive semi-definite).
## Installation
To install this package, first enter into Pkg mode by pressing `]` in the Julia REPL, then run the following command:
```julia
pkg> add GraphicalLasso
```
There is also the option to install the development version of this package directly from the GitHub repository:
```julia
pkg> add https://github.com/ivanuricardo/GraphicalLasso.jl
```
## Functions Included:
- `softthresh(x, λ)`: Applies soft thresholding to an array.
- `cdlasso(W11, s12, λ; max_iter=100, tol=1e-5)`: Solves the coordinate descent Lasso problem.
- `glasso(s, obs, λ; penalizediag=true, γ=0.0, tol=1e-05, verbose=true, maxiter=100, winit=zeros(size(s)))`: Estimates a sparse inverse covariance matrix using the glasso algorithm.
- `countedges(x, thr)`: Counts the number of edges in an array exceeding a threshold.
- `ebic(θ, ll, obs, thr, γ)`: Calculates the EBIC for a given precision matrix.
- `critfunc(s, θ, rho; penalizediag=true)`: Computes the objective function for the graphical lasso.
- `tuningselect(s, obs, λ; γ=0.0)`: Selects the optimal regularization parameter using EBIC.
- `randsparsecov(p, thr)`: Generates a random sparse covariance matrix.
- `iscov(x)`: Checks if a matrix is a valid covariance matrix.
## Example
Here is an example of how to use this package to generate a sparse covariance matrix, apply the graphical lasso algorithm, and select the optimal tuning parameter:
```julia
using LinearAlgebra, GraphicalLasso, Random, Distributions
Random.seed!(123456)
# Generate true sparse covariance matrix
p = 20
thr = 0.5
Σ = randsparsecov(p, thr)
# Check if this is a valid covariance matrix
iscov(Σ)
# Generate data from the true covariance matrix, create sample covariance matrix
obs = 100
μ = zeros(p)
unstddf = rand(MvNormal(zeros(p), Σ), obs)
df = (unstddf .- mean(unstddf, dims=2)) ./ std(unstddf, dims=2)
s = df * df' / obs
# Select the optimal tuning parameter from a range
λvalues = 0.0:0.01:2.0
optimalλ = tuningselect(s, obs, λvalues, verbose=false)
println("Optimal λ: ", optimalλ)
# Apply the graphical lasso algorithm
result = glasso(s, obs, optimalλ)
# Extract results
W = result.W
θ = result.θ
ll = result.ll
bicval = result.bicval
println("Estimated Precision Matrix: ", θ)
println("Log-Likelihood: ", ll)
println("EBIC Value: ", bicval)
# Validate if the result is a valid covariance matrix
is_valid_cov = iscov(W)
println("Is the estimated matrix a valid covariance matrix? ", is_valid_cov)
```
Moreover, although not the main focus of this package, we also provide a method to compute the lasso solution via coordinate descent.
We demonstrate this method below with a generated data set and a sparse response vector.
```julia
using LinearAlgebra, GraphicalLasso, Random, Plots
Random.seed!(123456)
N = 100
k = 100
kzeros = 90
X = randn(N, k)
beta = ones(k)
beta[1:kzeros] .= 0
betahat = zeros(k)
y = X * beta + randn(N)
λ = 10.0
cdlassobeta = cdlasso(X'X, X'y, λ)
# We expect the last column to be dense.
heatmap(reshape(cdlassobeta, 10, 10), yflip = true)
```
## Contribution
We welcome contributions to improve the package.
If you encounter any issues or have suggestions for new features, feel free to open an issue or submit a pull request.
## License
This project is licensed under the MIT License - see the [LICENSE](https://github.com/ivanuricardo/GraphicalLasso.jl/blob/main/LICENSE) file for details.
| GraphicalLasso | https://github.com/ivanuricardo/GraphicalLasso.jl.git |
|
[
"MIT"
] | 0.1.2 | c385995decd042d8ccb797c42b9603ed1e459869 | docs | 548 | # GraphicalLasso.jl
A package for fitting the graphical lasso and some diagnostics for tuning parameter selection.
This package follows the work of Friedman et al. (2008) and the extended BIC criterion of Foygel and Drton (2010).
We gain inspiration from the `glasso` package in R, and aim to provide a similar user experience in Julia.
## Graphical Lasso Main Functions
```@docs
glasso
cdlasso
```
## Information Criteria
```@docs
ebic
critfunc
tuningselect
```
## Utility Functions
```@docs
randsparsecov
iscov
```
## Index
```@index
```
| GraphicalLasso | https://github.com/ivanuricardo/GraphicalLasso.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 7162 | module JitterTime
using LinearAlgebra
include("types.jl") # Containing all datastructures necessary for JitterTime
include("util.jl") # Containing auxiliary functions
include("execution.jl") # Containing all code for Executing the JitterTimeSystem
include("printing.jl") # Containing all Pretty Printing code
export calcDynamics!,
reset!
"""
calcDynamics!(N::JTSystem)
Calculate the total system dynamics for the JitterTime system `N`.
Assuming the state of the total system (including all subsystems) to be x.
The continuous time evolution follows the equation
```
dx(t) = N.Ac x(t)dt + dvc(t)
```
where `vc` is continuous white noise with intensity `N.Rc`.
The discrete-event evolution (when executing system `S`) follows the equation
```
x+(t_k) = S.Ad x(t_k) + vd(t_k)
````
where vd is white Gaussian noise with variance `S.Rd`.
"""
function calcDynamics!(N::JTSystem{T}) where {T}
# Step 1: Count states and inputs and build a system-to-state index mapping
indices = Vector{UnitRange{Int64}}(undef, length(N.systems))
totstates::Int64 = 0
n::Int64 = 0
empty!(N.idtoindex)
merge!(N.idtoindex,
Dict(N.systems[x].id => x for x in eachindex(N.systems)))
length(N.idtoindex) == length(N.systems) || error("Duplicate System ID's")
for i in eachindex(N.systems)
# TODO: Add "stateindex"? Used for stateDisturbance
n = N.systems[i].n
indices[i] = UnitRange(totstates+1, totstates+n)
totstates += n
end # for
# Step 2: Check that the number of outputs and inputs match for all system connections (including reset dynamics)
# TODO Add reset dynamics
# TODO Add continuous systems
# FIXME Bug in original Matlab code? Can't use an arbitrary ID
if !_connections_correct(N)
error("Wrong number of inputs")
end # if
# Initialice Ac, Rc, Qc
Ac = zero(N.P)
Rc = zero(N.P)
Qc = zero(N.P)
# Step 3: Formulate the total continuous-time dynamics, continuous noise, discrete-time dynamics (all versions), discrete noise and continuous cost
# TODO Add reset dynamics?
for (k, s) in enumerate(N.systems)
xtou = zeros(T, s.n, totstates)
xtou[:, indices[k]] = Matrix{T}(I, s.n, s.n) # = I
_formulate_dynamics!(s, N, Ac, Rc, Qc, xtou, indices, totstates, k)
end # for
# Step 4: Create reduced system to improve performance
N.reduced = ReducedSystem(Ac, Rc, Qc)
return nothing
end
"""
reset!(N::JTSystem)
Reset the dynamics of the JitterTime system.
"""
function reset!(N::JTSystem)
# Revert passTime!, execSys!
N.J = 0.0
N.dJdt = 0.0
fill!(N.m, 0.0)
fill!(N.P, 0.0)
N.Tsim = 0.0
return nothing
end
function reset!(N::PeriodicJitterTimeSystem{T}) where {T}
N.periodicAnalysis = false
N.Atot = Matrix{T}(I, size(N.P)...)
fill!(N.Rtot, 0.0)
fill!(N.dtot, 0.0)
fill!(N.Pper, 0.0)
fill!(N.mper, 0.0)
reset!(N.jtsys)
end
###########################
### Auxiliary functions ###
###########################
#= Check that the number of outputs and inputs match for all system connections =#
function _connections_correct(N::JTSystem)
for s in N.systems
if !_connections_correct(s, N)
return false
end # if
end # for
return true
end # function
function _connections_correct(s::VersionedSystem, N::JTSystem)
for v in s.versions
if sum(N.systems[N.idtoindex[x]].p for x in v.inputid) != v.r
return false
end # if
end # for
return true
end # function
function _connections_correct(s::LinearSystem, N::JTSystem)
return sum(N.systems[N.idtoindex[x]].p for x in s.inputid) == s.r
end # function
#= Formulate dynamics depending on LinearSystem type =#
function _formulate_dynamics!(s::ContinuousSystem,
N::JTSystem{T},
Ac::Matrix{T},
Rc::Matrix{T},
Qc::Matrix{T},
xtou::Matrix,
indices::Vector{UnitRange{S}},
totstates::S,
k::S) where {T, S <: Integer}
# Construct Ac matrix
Ac[indices[k], indices[k]] = s.A
bix::Int64 = 1
for inputindex in map(x -> N.idtoindex[x], s.inputid)
input = N.systems[inputindex]
# Construct Ac matrix
Ac[indices[k], indices[inputindex]] = s.B[:, bix:bix+input.p-1]*input.C
bix += input.p
xtoy = zeros(T, input.p, totstates)
xtoy[:, indices[inputindex]] = input.C
xtou = vcat(xtou, xtoy)
end # for
# Construct Rc matrix
Rc[indices[k], indices[k]] = s.Rc
# Construct Qc matrix
Qc .+= xtou'*s.Qc*xtou # Combined cost of state, outputs and inputs
end # function
function _formulate_dynamics!(s::DiscreteSystem,
N::JTSystem{T},
Ac::Matrix{T},
Rc::Matrix{T},
Qc::Matrix{T},
xtou::Matrix,
indices::Vector{UnitRange{S}},
totstates::S,
k::S) where {T, S <: Integer}
sidx::Int64 = N.idtoindex[s.id]
push!(N.Ad[sidx], Matrix{T}(I, totstates, totstates))
N.Ad[sidx][1][indices[k], indices[k]] = s.A
push!(N.Rd[sidx], zeros(T, totstates, totstates))
N.Rd[sidx][1][indices[k], indices[k]] = s.R
bix::Int64 = 1
for inputindex in map(x -> N.idtoindex[x], s.inputid)
input = N.systems[inputindex]
N.Ad[sidx][1][indices[k], indices[inputindex]] = s.B[:, bix:bix+input.p-1]*input.C
bix += input.p
end # for
# Construct Qc matrix
Qc .+= xtou'*s.Qc*xtou # Combined cost of state, outputs and inputs
end # function
function _formulate_dynamics!(s::VersionedSystem,
N::JTSystem{T},
Ac::Matrix{T},
Rc::Matrix{T},
Qc::Matrix{T},
xtou::Matrix,
indices::Vector{UnitRange{S}},
totstates::S,
k::S) where {T, S <: Integer}
for (v, sver) in enumerate(s.versions)
sidx::Int64 = N.idtoindex[s.id]
push!(N.Ad[sidx], Matrix{T}(I, totstates, totstates)) # = I
N.Ad[sidx][v][indices[k], indices[k]] = sver.A
push!(N.Rd[sidx], zeros(T, totstates, totstates))
N.Rd[sidx][v][indices[k], indices[k]] = sver.R
bix::Int64 = 1
for inputindex in map(x -> N.idtoindex[x], sver.inputid)
input = N.systems[inputindex]
N.Ad[sidx][v][indices[k], indices[inputindex]] = sver.B[:, bix:bix+input.p-1]*input.C
bix += input.p
end # for
end # for
# Construct Qc matrix
Qc .+= xtou'*s.Qc*xtou # Combined cost of state, outputs and inputs
end # function
end # module
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 4553 | export execSys!,
passTime!,
passTimeUntil!,
beginPeriodicAnalysis!,
endPeriodicAnalysis!
"""
execSys!(N::JTSystem, sysid::Integer [, ver::Integer])
Execute the discrete-time system with the id 'sysid'. If the executed system is a `VersionedSystem` a version can be specifed.
## Arguments:
* `N`: The JitterTime system.
* `sysid`: ID of the discrete-time system to be updated.
* `ver`: (OPTIONAL) What version to execute (default = 1)
"""
function execSys!(N::JTSystem, sysid::Integer, ver::Integer = 1)
sidx = N.idtoindex[sysid]
N.systems[sidx] isa Union{DiscreteSystem, VersionedSystem} || error("Can only execute (versioned) discrete-time systems.")
Ad = N.Ad[sidx][ver]
Rd = N.Rd[sidx][ver]
N.m .= Ad * N.m
# N.P = Ad * N.P * Ad' .+ Rd
_mat_mul_add!(Ad, N.P, Ad', Rd, N.bufl)
if N isa PeriodicJitterTimeSystem && N.periodicAnalysis
# N.Atot = Ad * N.Atot
mul!(N.bufl, Ad, N.Atot)
N.Atot .= N.bufl
# N.Rtot = Ad * N.Rtot * Ad' + Rd
_mat_mul_add!(Ad, N.Rtot, Ad', Rd, N.bufl)
end
return nothing
end
"""
passTime!(N::JTSystem, T::Real)
Let time pass by `T` units, running all continuous-time systems and accumulating cost.
## Arguments:
* `N`: The JitterTime system.
* `T`: The time interval to pass
"""
function passTime!(N::JTSystem, time::Real)
time >= 0 || error("Time must be positive!")
time == 0 && return nothing # Special case when no time passed
(Ad, Rd, Qd, Qconst) = calcC2D(N.reduced, time)
#costm = N.m' * Qd * N.m
costm = dot(N.m, Qd, N.m)
#costP = tr(Qd * N.P) .+ Qconst
mul!(N.bufl, Qd, N.P)
costP = tr(N.bufl) .+ Qconst
N.J += costm + costP
N.dJdt = (costm + costP) / time
N.m .= Ad * N.m
# N.P = Ad * N.P * Ad' .+ Rd
_mat_mul_add!(Ad, N.P, Ad', Rd, N.bufl)
N.Tsim += time
if N isa PeriodicJitterTimeSystem && N.periodicAnalysis
# N.Atot = Ad * N.Atot
mul!(N.bufl, Ad, N.Atot)
N.Atot .= N.bufl
# N.Rtot = Ad * N.Rtot * Ad' + Rd
_mat_mul_add!(Ad, N.Rtot, Ad', Rd, N.bufl)
N.dtot .= Ad * N.dtot
end
return nothing
end
"""
passTime!(N::JTSystem)
Let time pass, running all continuous-time systems and accumulating cost, in a system with a fixed time interval length.
## Arguments:
* `N`: The JitterTime system.
"""
function passTime!(N::JTSystem)
try N.h catch; error("JTSystem does not contain a FixedIntervalJitterTimeSystem!") end
#costm = N.m' * Qd * N.m
costm = dot(N.m, N.FIQd, N.m)
#costP = tr(Qd * N.P) .+ Qconst
mul!(N.bufl, N.FIQd, N.P)
costP = tr(N.bufl) .+ N.FIQconst
N.J += costm + costP
N.dJdt = (costm + costP) / N.h
N.m .= N.FIAd * N.m
# N.P = Ad * N.P * Ad' .+ Rd
_mat_mul_add!(N.FIAd, N.P, N.FIAd', N.FIRd, N.bufl)
N.Tsim += N.h
if N isa PeriodicJitterTimeSystem && N.periodicAnalysis
# N.Atot = Ad * N.Atot
mul!(N.bufl, N.FIAd, N.Atot)
N.Atot .= N.bufl
# N.Rtot = Ad * N.Rtot * Ad' + Rd
_mat_mul_add!(N.FIAd, N.Rtot, N.FIAd', N.FIRd, N.bufl)
N.dtot .= N.FIAd * N.dtot
end
return nothing
end
"""
passTimeUntil!(N::JTSystem, T::Real)
Let time pass until `N.Tsim = T`, running all continuous-time systems and accumulating cost.
## Arguments:
* `N`: The JitterTime system.
* `T`: The target time. Must be greater than or equal to N.Tsim.
"""
passTimeUntil!(N::JTSystem, endTime::Real) = passTime!(N, endTime - N.Tsim)
"""
beginPeriodicAnalysis!(N::PeriodicJitterTimeSystem)
Start a periodic covariance analysis for the PeriodicJitterTimeSystem `N`. Must be called after [`calcDynamics!`](@ref).
## Arguments:
* `N`: The JitterTime system.
"""
function beginPeriodicAnalysis!(N::PeriodicJitterTimeSystem)
N.periodicAnalysis = true
return nothing
end # function
"""
endPeriodicAnalysis!(N::PeriodicJitterTimeSystem)
End a periodic covariance analysis for the PeriodicJitterTimeSystem `N`. Must be called after [`beginPeriodicAnalysis!`](@ref)
"""
function endPeriodicAnalysis!(N::PeriodicJitterTimeSystem)
N.periodicAnalysis || error("Periodic analysis not started")
N.Atot |> eigvalsCheck |> (x)->abs.(x) |> maximum < 1 || error("Periodic analysis failed due to unstable system mode(s)")
N.Pper = dlyap(N.Atot, N.Rtot)
N.mper = (I - N.Atot) \ N.dtot
N.periodicAnalysis = false
return nothing
end # function
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 2105 | # Structs
function Base.show(io::IO, sys::DiscreteSystem{T}) where {T}
n = sys.n - sys.p # Number of states in phi
println(io, "$(n == 0 ? "Static " : "")DiscreteSystem{$T} (ID = $(sys.id))\n{")
if n != 0
println(io, "\tA: $(sys.A[1:n, 1:n])") # TODO Which matrices should be printed?
println(io, "\tB: $(sys.B[1:n, :])")
println(io, "\tC: $(sys.A[n+1:end, 1:n])")
println(io, "\tD: $(sys.B[n+1:end, :])")
else
println(io, "\tD: $(sys.B[n+1:end, :])")
end
println(io, "\tR: $(sys.R)")
println(io, "\tQc: $(sys.Qc)")
print(io, "} Input from LinearSystem$((length(sys.inputid) == 1) ? ": $(sys.inputid[1])" : "s $(sys.inputid)")")
end # function
function Base.show(io::IO, sys::ContinuousSystem{T}) where {T}
println(io, "ContinuousSystem{$T} (ID = $(sys.id))\n{")
println(io, "\tA: $(sys.A)")
println(io, "\tB: $(sys.B)")
println(io, "\tC: $(sys.C)")
println(io, "\tRc: $(sys.Rc)")
println(io, "\tQc: $(sys.Qc)")
print(io, "} Input from LinearSystem$((length(sys.inputid) == 1) ? ": $(sys.inputid[1])" : "s: $(sys.inputid)")")
end # function
function Base.show(io::IO, sys::VersionedSystem{T}) where {T}
println(io, "VersionedSystem{$T} (ID = $(sys.id)) containing $(length(sys.versions)) DiscreteSystems")
end # function
function Base.show(io::IO, sys::JitterTimeSystem{T}) where {T}
print(io, "JitterTimeSystem{$T} containing LinearSystem$((length(sys.systems) == 1) ? ": $(sys.systems[1].id)" : "s: $(map(x -> x.id, sys.systems))")")
end # function
function Base.show(io::IO, sys::PeriodicJitterTimeSystem{T}) where {T}
print(io, "PeriodicJitterTimeSystem{$T} containing LinearSystem$((length(sys.systems) == 1) ? ": $(sys.systems[1].id)" : "s: $(map(x -> x.id, sys.systems))")")
end # function
function Base.show(io::IO, sys::FixedIntervalJitterTimeSystem{T}) where {T}
print(io, "FixedIntervalJitterTimeSystem{$T} (interval = $(sys.h)) containing LinearSystem$((length(sys.systems) == 1) ? ": $(sys.systems[1].id)" : "s: $(map(x -> x.id, sys.systems))")")
end # function
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 17724 | export LinearSystem,
DiscreteSystem,
ContinuousSystem,
VersionedSystem,
JTSystem,
JitterTimeSystem,
PeriodicJitterTimeSystem,
FixedIntervalJitterTimeSystem
# Abstract types
"""
LinearSystem{T}
Abstract supertype for all linear systems with elements of type `T`.
"""
abstract type LinearSystem{T} end
"""
JTSystem{T}
Abstract supertype for all JitterTime systems with elements of type `T`.
"""
abstract type JTSystem{T} end
# Extracts the parametric type of an abstract type
_parametric_type(::LinearSystem{T}) where {T} = T
_parametric_type(::JTSystem{T}) where {T} = T
# Converts input to matrix form (and promotes it to type T)
_to_matrix(T, A::AbstractVector) = Matrix{T}(reshape(A, length(A), 1))
_to_matrix(T, A::AbstractMatrix) = (Ahat = similar(A, T); Ahat .= A) # Fallback
_to_matrix(T, A::Number) = fill(T(A), 1, 1)
_to_matrix(T, A::Adjoint{AT, AM}) where {AT <: Number, AM <: AbstractMatrix} = _to_matrix(T, AM(A))
# Initial validation that the LinearSystem is feasible
function _init_validation(A, B, C, D, id, inputid)
nx = size(A, 1)
nu = size(B, 2)
ny = size(C, 1)
if size(A, 2) != nx && nx != 0 # FIXME size([],1)==0; size([],2)==1
error("A must be square")
elseif size(B, 1) != nx
error("The number of rows of A ($(size(A,1))) and B ($(size(B,1))) are not equal")
elseif size(C, 2) != nx # FIXME add nx != 0 check? This is wrong
error("The number of columns of A ($(size(A,2))) and C ($(size(C,2))) are not equal")
elseif nu != size(D, 2)
error("The number of columns of B ($(size(B,2))) and D ($(size(D,2))) are not equal")
elseif ny != size(D, 1)
error("The number of rows of C ($(size(C,1))) and D ($(size(D,1))) are not equal")
elseif id <= 0
error("ID must be positive")
elseif id in inputid
error("System ($id) can't be connected to itself")
end
return nx, nu, ny
end # function
###############
### Structs ###
###############
"""
ContinuousSystem(sysid, A, B, C, D, inputid [, Rc, Qc])
Create a continuous-time linear system
```
u(t) --> sys --> y(t)
x(t)
```
The state initially has mean value `E(x) = 0` and covariance `V(x) = 0`.
## Arguments:
* `sysid`: A unique positive `Integer` identifier for this system (pick any). Used when referred to from other systems.
* `[A, B, C, D]`: A strictly proper, delay-free continuous-time LTI system in state-space form.
* `inputid`: A vector of input system identifiers.
The outputs of the corresponding systems will be used as inputs to this system.
The number of inputs in this system must equal the total number of outputs in the input systems.
An empty vector (or zero) indicates that the system inputs are unconnected.
* `Rc`: (OPTIONAL) The continuous state or input noise intensity matrix.
The noise vector is assumed to have the same size as x (for state-space systems).
Default assumed zero.
* `Qc`: (OPTIONAL) The continuous cost function is `E(Integral [x(t); u(t)]' * Qc * [x(t); u(t)] dt)` (for state-space systems).
Default assumed zero.
"""
struct ContinuousSystem{T} <: LinearSystem{T}
A::Matrix{T}
B::Matrix{T}
C::Matrix{T}
Rc::Matrix{T} # Noise intensity matrix (continuous)
Qc::Matrix{T} # Weight matrix (continuous)
inputid::Vector{Integer}
n::Integer # Number of states
r::Integer # Number of inputs
p::Integer # Number of outputs
id::Integer
resetDynamics::Bool
end
# Constructors
function ContinuousSystem(id::S, A, B, C, D, inputid::Vector{S},
Rc = zeros(size(A, 2), size(A, 2)),
Qc = zeros(size(A, 2)+size(B, 2), size(A, 2)+size(B, 2))) where {S <: Integer}
n, r, p = _init_validation(A, B, C, D, id, inputid)
(size(Rc,1),size(Rc,2)) == (n,n) || error("Rc ($(size(Rc,1))*$(size(Rc,2))) should be a n*n matrix; n = #states (= $n)")
(size(Qc,1),size(Qc,2)) == (n+r,n+r) || error("Qc ($(size(Qc,1))*$(size(Qc,2))) should be an (n+r)*(n+r) matrix; n = #states (= $n), r = #inputs (= $r)")
T = promote_type(eltype(A), eltype(B), eltype(C), eltype(D), eltype(Rc), eltype(Qc))
return ContinuousSystem{T}(_to_matrix(T, A), _to_matrix(T, B), _to_matrix(T, C), _to_matrix(T, Rc), _to_matrix(T, Qc), inputid, n, r, p, id, false)
end
ContinuousSystem(id::S, A, B, C, D, inputid::S,
Rc = zeros(size(A, 2), size(A, 2)),
Qc = zeros(size(A, 2)+size(B, 2), size(A, 2)+size(B, 2))) where {S <: Integer} =
ContinuousSystem(id, A, B, C, D, S[inputid], Rc, Qc)
"""
DiscreteSystem(sysid, Phi, Gam, C, D, inputid [, Rd, Qc])
Create a discrete-time linear system
```
u(k) --> sys --> y(k)
x(k)
```
The state/output initially has mean value `E([x; y]) = 0` and covariance `V([x; y]) = 0`.
## Arguments:
* `sysid`: A unique positive `Integer` identifier for this system (pick any). Used when referred to from other systems.
* `[Phi, Gam, C, D]`: A discrete-time LTI system in state-space form (if `Phi`, `Gam`, and `C` are missing, interpreted as a static gain).
* `inputid`: A vector of input system identifiers.
The outputs of the corresponding systems will be used as inputs to this system.
The number of inputs in this system must equal the total number of outputs in the input systems.
An empty vector (or zero) indicates that the system inputs are unconnected.
* `Rd`: (OPTIONAL) The discrete state/output/input noise covariance matrix.
The noise vector is assumed to have the same size as `[x; y]` (for state-space systems).
Noise is added each time the system is executed.
Default assumed zero.
* `Qc`: (OPTIONAL) The continuous cost function is `E(Integral [x(t); u(t)]' * Qc * [x(t); u(t)] dt)` (for state-space systems).
_Note_ that both `x(t)` and `y(t)` are held constant between system executions.
Default assumed zero.
"""
struct DiscreteSystem{T} <: LinearSystem{T}
A::Matrix{T}
B::Matrix{T}
C::Matrix{T}
R::Matrix{T} # Noise
Qc::Matrix{T} # Cost
inputid::Vector{Integer}
n::Integer # Number of states
r::Integer # Number of inputs
p::Integer # Number of outputs
id::Integer
end
# Constructor
function DiscreteSystem(id::S, Phi, Gam, C, D, inputid::Vector{S},
R = zeros(size(Phi, 2)+size(C, 1), size(Phi, 2)+size(C, 1)),
Qc = zeros(size(Phi, 2)+size(C, 1), size(Phi, 2)+size(C, 1))) where {S <: Integer}
n, r, p = _init_validation(Phi, Gam, C, D, id, inputid)
(size(R,1),size(R,2)) == (n+p,n+p) || error("R ($(size(R,1))*$(size(R,2))) should be a (n+p)*(n+p) matrix; n = #states (= $n), p = #outputs (= $p).")
(size(Qc,1),size(Qc,2)) == (n+p,n+p) || error("Qc ($(size(Qc,1))*$(size(Qc,2))) should be a (n+p)*(n+p) matrix; n = #states (= $n), p = #outputs (= $p).")
Aarray = [Phi zeros(n, p); C zeros(p, p)]
Barray = [Gam; D]
T = promote_type(eltype(Phi), eltype(Gam), eltype(C), eltype(D), eltype(R), eltype(Qc))
return DiscreteSystem{T}(_to_matrix(T, Aarray), _to_matrix(T, Barray), _to_matrix(T, [zeros(p, n) I]), _to_matrix(T, R), _to_matrix(T, Qc), inputid, n+p, r, p, id)
end
DiscreteSystem(id::S, Phi, Gam, C, D, inputid::S,
R = zeros(size(Phi, 2)+size(C, 1), size(Phi, 2)+size(C, 1)),
Qc = zeros(size(Phi, 2)+size(C, 1), size(Phi, 2)+size(C, 1))) where {S <: Integer} =
DiscreteSystem(id, Phi, Gam, C, D, S[inputid], R, Qc)
function DiscreteSystem(id::S, D, inputid::Vector{S},
R = zeros(size(D, 1), size(D, 1)),
Qc = zeros(size(D, 1), size(D, 1))) where {S <: Integer}
n, r, p = 0, size(D, 2), size(D, 1)
(size(R,1),size(R,2)) == (p,p) || error("R ($(size(R,1))*$(size(R,2))) should be a (n+p)*(n+p) matrix; n = #states (= $n), p = #outputs (= $p).")
(size(Qc,1),size(Qc,2)) == (p,p) || error("Qc ($(size(Qc,1))*$(size(Qc,2))) should be a (n+p)*(n+p) matrix; n = #states (= $n), p = #outputs (= $p).")
Aarray = zeros(p, p)
Barray = D
T = promote_type(eltype(D), eltype(R), eltype(Qc))
return DiscreteSystem{T}(_to_matrix(T, Aarray), _to_matrix(T, Barray), Matrix{T}(I, p, p), _to_matrix(T, R), _to_matrix(T, Qc), inputid, n+p, r, p, id)
end
DiscreteSystem(id::S, D, inputid::S,
R = zeros(size(D, 1), size(D, 1)),
Qc = zeros(size(D, 1), size(D, 1))) where {S <: Integer} =
DiscreteSystem(id, D, S[inputid], R, Qc)
"""
VersionedSystem(versions::Vector{DiscreteSystem})
Create a versioned discrete-time linear system; the version identifier is given by its index in the input vector.
"""
struct VersionedSystem{T} <: LinearSystem{T}
versions::Vector{DiscreteSystem}
# Constructor
function VersionedSystem(vers::Vector{DiscreteSystem{T}}) where {T}
S = promote_type(_parametric_type.(vers)...)
length(vers) > 1 || error("More than one DiscreteSystem necessary to use VersionedSystem")
all(x -> x.id == vers[1].id, vers) || error("Versions do not share ID")
all(x -> x.n == vers[1].n, vers) || error("Versions do not share state-space dimensions")
all(x -> x.r == vers[1].r, vers) || error("Versions do not share input dimensions")
all(x -> x.p == vers[1].p, vers) || error("Versions do not share output dimensions")
all(x -> _to_matrix(S, x.Qc) == _to_matrix(S, vers[1].Qc), vers) || error("Qc is defined in continuous time and cannot have multiple versions")
new{S}(vers)
end
end
#= Override get function for structs of type VersionedSystem =#
function Base.getproperty(v::VersionedSystem{T}, s::Symbol) where {T}
if s === :C || s === :Qc
return _to_matrix(T, getproperty(v.versions[1], s))
elseif s === :n || s === :r || s === :p || s === :id
return getproperty(v.versions[1], s)
end # if
return getfield(v, s)
end # function
#= Internal ReducedSystem struct =#
mutable struct ReducedSystem{T}
n::Integer
Ac::Matrix{T}
Rc::Matrix{T}
Qc::Matrix{T}
indices::Vector{Integer}
# Submatrices used in calcC2D
M112::Vector{Matrix{T}}
M122::Vector{Matrix{T}}
M213::Vector{Matrix{T}}
M223::Vector{Matrix{T}}
# Sanity checks
maxAbsEig::Real
Mnorm::T
# Matrix buffers to reduce allocation time
bufAd::Matrix{T}
bufRd::Matrix{T}
bufQd::Matrix{T}
bufl::Matrix{T} # Large buffer
# Commonly used matrices
Id::Matrix{T} # Large identity matrix
Zl::Matrix{T} # Large zero matrix
function ReducedSystem(Ac::Matrix{T},
Rc::Matrix{T},
Qc::Matrix{T}) where {T}
n = size(Ac, 1)
# Find all closed-loop states which are continuous
mask = iszero.(Ac) .& iszero.(Rc) .& iszero.(Qc)
indices = filter(!iszero, (vec(any(!, mask, dims=1)) .| vec(any(!, mask, dims=2))) .* (1:n))
# Reduce the continuous matrices
reducedAc = Ac[indices,indices]
reducedRc = Rc[indices,indices]
reducedQc = Qc[indices,indices]
# Create buffers for reduced discretised continuous system
bufAd = Matrix{T}(undef, n, n)
bufRd = Matrix{T}(undef, n, n)
bufQd = Matrix{T}(undef, n, n)
M122 = [reducedAc^k for k in 0:1]
M112 = [zero(reducedQc), reducedQc]
M223 = [zero(reducedRc), reducedRc']
M213 = [zero(reducedRc), zero(reducedRc)]
for k in 2:18
c = 1.0/k
push!(M213, c*(-reducedAc * last(M213) + last(M223))) # + zero matrix
push!(M223, c*(-reducedAc * last(M223) + reducedRc'* last(M122)'))
push!(M112, c*(-reducedAc'* last(M112) + reducedQc * last(M122)))
push!(M122, c*( reducedAc * last(M122)))
end
# Norm used for scaling and squaring
Meig = maximum(abs.(eigvalsCheck(reducedAc))) # TODO Remove eigvalsCheck and handle some other way...
l = size(reducedAc, 1)
M = zeros(T, 2 .* size(reducedAc))
M[1:l, 1:l] = -reducedAc'
M[1:l, l+1:2*l] = reducedQc
M[l+1:2*l, l+1:2*l] = reducedAc
M1norm = norm(M, 1)
Id = Matrix{T}(I, size(reducedAc))
M = zeros(T, 3 .* size(reducedAc))
M[1:l, 1:l] = M[l+1:2*l, l+1:2*l] = -reducedAc
M[2*l+1:3*l, 2*l+1:3*l] = reducedAc'
M[1:l, l+1:2*l] = Id
M[l+1:2*l, 2*l+1:3*l] = reducedRc'
M2norm = norm(M, 1)
return new{T}(n, reducedAc, reducedRc, reducedQc, indices,
M112, M122, M213, M223,
Meig, max(M1norm, M2norm),
bufAd, bufRd, bufQd, zeros(T, n, n),
Matrix{T}(I, n, n), zeros(T, n, n))
end
end
"""
JitterTimeSystem(systems::Vector{LinearSystem})
Initialize a new JitterTime system.
"""
mutable struct JitterTimeSystem{T} <: JTSystem{T}
systems::Vector{LinearSystem}
idtoindex::Dict{Integer, Integer}
J::T
dJdt::T
Tsim::T
Ad::Vector{Vector{Matrix{T}}} # TODO: Change this complex structure to individual structs instead
Rd::Vector{Vector{Matrix{T}}} # TODO: Change this complex structure to individual structs instead
m::Vector{T}
P::Matrix{T}
reduced::ReducedSystem{T}
# Implicit Constructor
function JitterTimeSystem(T, systems::Vector{S}) where {S <: LinearSystem}
totstates::Int64 = sum(sys.n for sys in systems)
Ad::Vector{Vector{Matrix{T}}} = [Matrix{T}[] for _ in systems]
Rd::Vector{Vector{Matrix{T}}} = [Matrix{T}[] for _ in systems]
P = zeros(T, totstates, totstates)
m = zeros(T, totstates)
return new{T}(systems, Dict{Integer, Integer}(), T(0), T(0), T(0), Ad, Rd, m, P)
end # Constructor
end
JitterTimeSystem(systems::Vector{S}) where {S <: LinearSystem} = JitterTimeSystem(promote_type(_parametric_type.(systems)...), systems)
#= Override get function for structs of type JitterTimeSystem =#
function Base.getproperty(sys::JitterTimeSystem, s::Symbol)
if hasproperty(sys, s)
return getfield(sys, s)
end # if
return getproperty(sys.reduced, s)
end # function
#= Override set function for structs of type JitterTimeSystem =#
function Base.setproperty!(sys::JitterTimeSystem, s::Symbol, x)
if hasproperty(sys, s)
return setfield!(sys, s, x)
end # if
return setproperty!(sys.reduced, s, x)
end # function
"""
PeriodicJitterTimeSystem(N::JTSystem)
Initialize a new Periodic JitterTime system.
"""
mutable struct PeriodicJitterTimeSystem{T} <: JTSystem{T}
jtsys::JTSystem{T}
periodicAnalysis::Bool
Atot::Matrix{T}
Rtot::Matrix{T}
dtot::Matrix{T} # TODO: What is this?
Pper::Matrix{T}
mper::Matrix{T} # TODO: What is this?
# Implicit Constructor
function PeriodicJitterTimeSystem(jtsys::S) where {S <: JTSystem}
!(jtsys isa PeriodicJitterTimeSystem) || error("Unecessary wrapping of PeriodicJitterTimeSystem")
T = _parametric_type(jtsys)
Atot = Matrix{T}(I, size(jtsys.P))
Rtot = zeros(T, size(jtsys.P))
dtot = zeros(T, size(jtsys.P, 1), 1)
Pper = zero(Rtot)
mper = zero(dtot)
new{T}(jtsys, false, Atot, Rtot, dtot, Pper, mper)
end
end
#= Override get function for structs of type PeriodicJitterTimeSystem =#
function Base.getproperty(sys::PeriodicJitterTimeSystem, s::Symbol)
if hasproperty(sys, s)
return getfield(sys, s)
end # if
return getproperty(sys.jtsys, s)
end # function
#= Override set function for structs of type PeriodicJitterTimeSystem =#
function Base.setproperty!(sys::PeriodicJitterTimeSystem, s::Symbol, x)
if hasproperty(sys, s)
return setfield!(sys, s, x)
end # if
return setproperty!(sys.jtsys, s, x)
end # function
"""
FixedIntervalJitterTimeSystem(N::JTSystem, h::Real)
Initialize a new JitterTime system with fixed activation interval h.
"""
mutable struct FixedIntervalJitterTimeSystem{T} <: JTSystem{T}
jtsys::JTSystem{T} # TODO: Should this be of type JTSystem{T}?
h::Real
FIAd::Matrix{T}
FIRd::Matrix{T}
FIQd::Matrix{T}
FIQconst::T
function FixedIntervalJitterTimeSystem(jtsys::S, h::Real) where {S <: JTSystem}
h > 0 || error("h must be positive!")
T = _parametric_type(jtsys)
tmp_sys = deepcopy(jtsys) # create deepcopy to calculate dynamic on
calcDynamics!(tmp_sys)
(Ad, Rd, Qd, Qconst) = calcC2D(tmp_sys.reduced, h) # TODO: This is bad practice... We assume the jtsys has had its dynamics calculated before hand (even though we never do it elsewhere)
new{T}(jtsys, h, Ad, Rd, Qd, Qconst)
end
end
#= Override get function for structs of type FixedIntervalJitterTimeSystem =#
function Base.getproperty(sys::FixedIntervalJitterTimeSystem, s::Symbol)
if hasproperty(sys, s)
return getfield(sys, s)
end # if
return getproperty(sys.jtsys, s)
end # function
#= Override set function for structs of type FixedIntervalJitterTimeSystem =#
function Base.setproperty!(sys::FixedIntervalJitterTimeSystem, s::Symbol, x)
if hasproperty(sys, s)
return setfield!(sys, s, x)
end # if
return setproperty!(sys.jtsys, s, x)
end # function
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 4283 | """
calcC2D(N::ReducedSystem, h)`
Calculate the discrete-time (ZOH) version of the continuous system
```
xdot = a*x + w
```
where the incremental variance of `w` is `r`. The cost of the system is
```
J = integral_0^h (x'*q*x) dt.
```
The resulting discrete-time system is
```
x(n+1) = Phi*x(n) + e(n)
```
where the variance of `e(n)` is `R`, and the cost is
```
J = x'*Q*x + Qconst.
```
"""
function calcC2D(N::ReducedSystem, dt::Real)
copy!(N.bufAd, N.Id)
copy!(N.bufRd, N.Zl)
copy!(N.bufQd, N.Zl)
(N.bufAd[N.indices, N.indices],
N.bufRd[N.indices, N.indices],
N.bufQd[N.indices, N.indices],
Qconst) = _calcC2D(N, dt)
return (N.bufAd, N.bufRd, N.bufQd, Qconst)
end
function _calcC2D(N::ReducedSystem, dt::Real)
if N.Mnorm * dt > 1.0 || N.maxAbsEig * dt > 4.0
(Phi, R, Q, Qconst) = _calcC2D(N, dt/2)
Qconst = 2*Qconst + tr(Q * R)
Q .+= Phi' * Q * Phi
R .+= Phi * R * Phi'
Phi .= Phi*Phi
return Phi, R, Q, Qconst
end
phi12 = jitterExp(N.M112, dt)
phi22 = jitterExp(N.M122, dt)
phi13 = jitterExp(N.M213, dt)
phi23 = jitterExp(N.M223, dt)
# Q = (Q+Q')*0.5
Q = phi22' * phi12
lmul!(0.5, Q)
Q .+= Q'
# R = (R+R')*0.5
R = phi22 * phi23 #phi33'*phi23
lmul!(0.5, R)
R .+= R'
Qconst = tr(phi22 * phi13 * N.Qc)
return (phi22, R, Q, Qconst)
end
# Theta from: Computing the Matrix Exponential with an Optimized Taylor Polynomial Approximation
function jitterExp(M::Vector{Matrix{T}}, dt::Real) where {T}
return jitterTaylor(M, dt, 18)
end
function jitterTaylor(M::Vector{Matrix{T}}, dt::Real, m::Int64) where {T}
h = one(dt)
p = zero(M[1])
for k in 1:m+1
#p .+= h .* M[k]
axpy!(h, M[k], p)
h *= dt
end
return p
end
######################
### TRACE OVERLOAD ###
######################
"""
tr(A::Matrix{T})
Return the trace of an arbitrary matrix `A`.
The trace sums all the elements on the diagonal of `A`.
"""
function tr(A::Matrix{T}) where {T}
return sum((A)[idx, idx] for idx in 1:size(A, 1))
end # function
######################
## EIGVALS OVERLOAD ##
######################
""" Temporary eigenvalue check for matrix types who don't support LinearAlgebra.eigvals """
function eigvalsCheck(A::Matrix{T}) where {T}
if isdefined(first(A), :value)
return eigvals(getproperty.(A, :value))
else
return eigvals(A)
end
end # function
######################
#### FREXP CHECK #####
######################
""" Temporary function for matrix types who don't support frexp """
function frexpCheck(A)
if isdefined(first(A), :value)
return frexp(getproperty.(A, :value))
else
return frexp(A)
end
end # function
######################
# DISCRETE LYAPUNOV ##
######################
"""
dlyap(A::Matrix{T}, Q)
Compute the solution `X` to the discrete Lyapunov equation
```
AXA' - X + Q = 0.
```
"""
function dlyap(A::Matrix{T}, Q) where {T}
lhs = kron(A, conj(A))
lhs = I - lhs
x = lhs\reshape(Q, prod(size(Q)), 1)
return reshape(x, size(Q))
end
######################
### AUX MATRIX MUL ###
######################
"""
_mat_mul!(A, B, C, Z)
Compute the solution to A*B*C and store it in B
"""
function _mat_mul!(A::Matrix{T}, B::Matrix{T}, C::Matrix{T}, Z::Matrix{T}) where {T}
mul!(Z, B, C)
mul!(B, A, Z)
end
function _mat_mul!(A::Adjoint{T, Matrix{T}}, B::Matrix{T}, C::Matrix{T}, Z::Matrix{T}) where {T}
mul!(Z, B, C)
mul!(B, A, Z)
end
function _mat_mul!(A::Matrix{T}, B::Matrix{T}, C::Adjoint{T, Matrix{T}}, Z::Matrix{T}) where {T}
mul!(Z, B, C)
mul!(B, A, Z)
end
"""
_mat_mul_add!(A, B, C, D, Z)
Compute the solution to A*B*C + D and store it in B
"""
function _mat_mul_add!(A::Matrix{T}, B::Matrix{T}, C::Matrix{T}, D::Matrix{T}, Z::Matrix{T}) where {T}
_mat_mul!(A, B, C, Z)
axpy!(1, D, B)
end
function _mat_mul_add!(A::Adjoint{T, Matrix{T}}, B::Matrix{T}, C::Matrix{T}, D::Matrix{T}, Z::Matrix{T}) where {T}
_mat_mul!(A, B, C, Z)
axpy!(1, D, B)
end
function _mat_mul_add!(A::Matrix{T}, B::Matrix{T}, C::Adjoint{T, Matrix{T}}, D::Matrix{T}, Z::Matrix{T}) where {T}
_mat_mul!(A, B, C, Z)
axpy!(1, D, B)
end
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 854 | module ContinuousTests
using Test
using JitterTime
@testset "Continuous Tests" begin
A1 = [-1 1; 0 -1]
B1 = [0; 1]
C1 = [1. 0]
D1 = 0
A2 = 0.
B2 = 1
C2 = -1
D2 = 0
R1 = B1*B1'
R2 = 1.
Q1 = C1'*C1
Q2 = 1.
Q2 = zeros(Float64, 2, 2)
plant = ContinuousSystem(1, A1, B1, C1, D1, 2, R1, [1. 0 0; 0 1 0; 0 0 1])
controller = ContinuousSystem(2, A2, B2, C2, D2, 1, R2, Q2)
N = JitterTimeSystem([plant, controller])
calcDynamics!(N)
passTime!(N, 1000)
@test isapprox(N.dJdt, 7.47)
@test isapprox(N.J, 7470.)
@test all(isapprox.(N.Rc, [0 0 0; 0 1 0; 0 0 1]))
@test all(isapprox.(N.Ac, [-1 1 0; 0 -1 -1; 1 0 0]))
@test all(isapprox.(N.Qc, [1 0 0; 0 1 0; 0 0 1]))
@test all(isapprox.(N.P, [1.5 1.5 -0.5; 1.5 2.5 -2.0; -0.5 -2 3.5]))
end # testset
end # module
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 955 | module DiscreteTests
using Test
using JitterTime
@testset "Discrete Tests" begin
A1 = [-1 1; 0 -1]
B1 = [0; 1]
C1 = [1 0]
D1 = 0
A2 = 0
B2 = 1
C2 = -1
D2 = 0
R1 = [0 0 0; 0 1 0; 0 0 1]
R2 = [0 0; 0 0.1]
Q1 = [1 0 0; 0 2 3; 0 3 0] # (n+p)=⍴Qd
Q2 = [0 0; 0 1]
plant = DiscreteSystem(1,A1,B1,C1,D1,2,R1,Q1)
controller = DiscreteSystem(2,A2,B2,C2,D2,1,R2,Q2)
N = JitterTimeSystem([plant, controller])
calcDynamics!(N)
execSys!(N,1)
execSys!(N,2)
execSys!(N,2)
execSys!(N,1)
execSys!(N,2)
@test isapprox(N.dJdt, 0.0)
@test isapprox(N.J, 0.0)
@test all(isapprox.(N.Rc, zeros(eltype(N.Rc),size(N.Rc))))
@test all(isapprox.(N.Ac, zeros(eltype(N.Ac),size(N.Ac))))
@test all(isapprox.(N.Qc, [1. 0 0 0; 0 2 3 0; 0 3 0 0; 0 0 0 1]))
@test all(isapprox.(N.P, [1 -1 0 0 0; -1 3.1 0 0 1; 0 0 1 1 0; 0 0 1 1 0; 0 1 0 0 1.1]))
end # testset
end # module
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 4544 | module Example1Tests
using Test
using JitterTime
@testset "Example 1" begin
h = 1
A1 = 0
B1 = 1
C1 = 1
D1 = 0
R1 = 1
Q1 = [1 0; 0 0] #Punish only the state
K = -(3+sqrt(3))/(2+sqrt(3))/h
pvec = [0, .100000000000000, 0.200000000000000, 0.300000000000000, 0.400000000000000, 0.500000000000000, 0.600000000000000, 0.700000000000000, 0.800000000000000, 0.900000000000000, 1.000000000000000, 1.000000000000000, 1.100000000000000, 1.200000000000000, 1.300000000000000, 1.400000000000000, 1.500000000000000, 1.600000000000000, 1.700000000000000, 1.800000000000000, 1.900000000000001, 2.000000000000000, 2.000000000000000, 2.100000000000001, 2.200000000000001, 2.300000000000001, 2.400000000000001, 2.500000000000001, 2.600000000000001, 2.700000000000001, 2.800000000000001, 2.900000000000001, 3.000000000000001, 3.000000000000001, 2.387461339178930, 1.871384387633062, 1.451769145362399, 1.128615612366940, 0.901923788646685, 0.771693674201634, 0.737925269031787, 0.800618573137145, 0.959773586517707, 1.215390309173473, 1.215390309173473, 1.026719448082348, 0.877128129211021, 0.766616352559490, 0.695184118127758, 0.662831425915822, 0.669558275923684, 0.715364668151343, 0.800250602598800, 0.924216079266054, 1.087261098153105, 1.087261098153105, 0.929022575872818, 0.805743741577959, 0.717424595268528, 0.664065136944525, 0.645665366605950, 0.662225284252803, 0.713744889885084, 0.800224183502793, 0.921663165105930, 1.078061834694495]
Jvec = [0, 0.005000000000000, 0.020000000000000, 0.045000000000000, 0.080000000000000, 0.125000000000000, 0.180000000000000, 0.245000000000000, 0.320000000000000, 0.405000000000000, 0.500000000000000, 0.500000000000000, 0.605000000000000, 0.720000000000000, 0.845000000000000, 0.980000000000000, 1.125000000000000, 1.280000000000000, 1.445000000000000, 1.620000000000000, 1.805000000000000, 2.000000000000000, 2.000000000000000, 2.205000000000000, 2.420000000000000, 2.645000000000000, 2.880000000000000, 3.125000000000000, 3.380000000000000, 3.645000000000000, 3.920000000000000, 4.205000000000000, 4.500000000000000, 4.500000000000000, 4.768569219381654, 4.980707658144960, 5.146061487217439, 5.274276877526613, 5.375000000000001, 5.457877025565123, 5.532554125149501, 5.608677469680654, 5.695893230086103, 5.803847577293369, 5.803847577293369, 5.915627402304328, 6.010494118317165, 6.092355679553859, 6.165120040236389, 6.232695154586737, 6.298988976826880, 6.367909461178800, 6.443364561864476, 6.529262233105887, 6.629510429125013, 6.629510429125013, 6.730033282093097, 6.816480267232424, 6.892347353341536, 6.961130509218977, 7.026325703663288, 7.091428905473014, 7.159936083446697, 7.235343206382879, 7.321146243080103, 7.420841162336913]
pvecjl = Float64[]
Jvecjl = Float64[]
plant = ContinuousSystem(10,A1,B1,C1,D1,30,R1,Q1)
sampler = DiscreteSystem(20,1,10)
controller = DiscreteSystem(30,K,20)
N = JitterTimeSystem([plant, sampler, controller])
calcDynamics!(N)
Nsteps = 6
dt = h/10.0
for k = 1:Nsteps
for j = 1:10
push!(pvecjl, N.P[1, 1])
push!(Jvecjl, N.J)
passTime!(N,dt)
end
push!(pvecjl, N.P[1, 1])
push!(Jvecjl, N.J)
if k >= 3
execSys!(N,20)
execSys!(N,30)
end
end
@test isapprox(N.dJdt, 0.996949192568094)
@test isapprox(N.J, 7.420841162336913)
#@test all(isapprox.(N.Rc, [1. 0 0; 0 0 0; 0 0 0]))
#@test all(isapprox.(N.Ac, [0. 0 1; 0 0 0; 0 0 0]))
#@test all(isapprox.(N.Qc, [1. 0 0; 0 0 0; 0 0 0]))
@test all(isapprox.(N.P, [1.078061834694495 1.078061834694495 -1.366927632691700; 1.078061834694495 1.078061834694495 -1.366927632691700; -1.366927632691700 -1.366927632691700 1.733194787983227]))
@test all(isapprox.(pvecjl, pvec))
@test all(isapprox.(Jvecjl, Jvec))
reset!(N)
for k = 1:Nsteps
for j = 1:10
passTime!(N,dt)
end
if k >= 3
execSys!(N,20)
execSys!(N,30)
end
end
@test isapprox(N.dJdt, 0.996949192568094)
@test isapprox(N.J, 7.420841162336913)
#@test all(isapprox.(N.Rc, [1. 0 0; 0 0 0; 0 0 0]))
#@test all(isapprox.(N.Ac, [0. 0 1; 0 0 0; 0 0 0]))
#@test all(isapprox.(N.Qc, [1. 0 0; 0 0 0; 0 0 0]))
@test all(isapprox.(N.P, [1.078061834694495 1.078061834694495 -1.366927632691700; 1.078061834694495 1.078061834694495 -1.366927632691700; -1.366927632691700 -1.366927632691700 1.733194787983227]))
end # testset
end # module
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 4123 | module Example2Tests
using Test
using JitterTime
@testset "Example 2" begin #Instead of an aperiodic sampler, the extra sampler will be executed every 0.5 timeunit
A1 = [0 1; 1 0]
B1 = [0; 1]
C1 = [1 0; 0 1]
D1 = [0; 0]
Qc = [1 0 0; 0 .1 0; 0 0 .1]
R1c = B1*B1'
plant = ContinuousSystem(1,A1,B1,C1,D1,5,R1c,Qc)
S1 = [1 0]
R21 = 0.1
sampler1 = DiscreteSystem(2,S1,1,R21)
R22 = 0.01
S2 = [0 1]
sampler2 = DiscreteSystem(3,S2,1,R22)
AO1 = [.5309 .079 0; -.8673 .8597 0; 0 0 0];
BO1 = [.0059 .4750 0; .1392 1.0065 0; 1 0 0];
CO1 = [.5309 .079 0; -.8673 .8597 0; 0 0 0];
DO1 = [.0059 .4750 0; .1392 1.0065 0; 1 0 0];
obs1 = DiscreteSystem(4,AO1,BO1,CO1,DO1,[5, 2, 3])
AO2 = [ 1 -.3906 0; 0 .0163 0; 0 0 1];
BO2 = [0 0 .3906; 0 0 .9837; 0 0 0];
CO2 = [ 1 -.3906 0; 0 .0163 0; 0 0 1];
DO2 = [0 0 .3906; 0 0 .9837; 0 0 0];
obs2 = DiscreteSystem(4,AO2,BO2,CO2,DO2,[5, 2, 3])
obs = VersionedSystem([obs1,obs2])
L = [3.633 2.7434 0]
feedback = DiscreteSystem(5,-L,4)
N = JitterTimeSystem([plant, sampler1, sampler2, obs, feedback])
calcDynamics!(N)
dt = 0.05
for t = 1:240
for k = 1:10
passTime!(N,dt/10)
end
if 0 == mod(t,10)
execSys!(N,3)
execSys!(N,4,2)
end
if 0 == mod(t,3)
execSys!(N,2)
execSys!(N,4,1)
execSys!(N,5)
end
end
@test isapprox(N.dJdt, 0.860680772466343)
@test isapprox(N.J, 12.574852494566098)
R = zeros(3,3)
R[2,2] = 1
@test all(isapprox.(N.Rc, R))
A = zeros(3,3)
A[1,2]=A[2,1]=A[2,3] = 1
@test all(isapprox.(N.Ac, A))
Q = zeros(3,3)
Q[1,1] = 1
Q[2,2] = Q[3,3] = 0.1
@test all(isapprox.(N.Qc, Q))
P = [ 0.334482211088805 0.013682570982143 0.334482211088805 0.013682570982143 0.346026331881820 -0.067470502293183 -0.736229907436902 0.346026331881820 -0.067470502293183 -0.736229907436902 -1.072015087735536; 0.013682570982143 0.736398452636592 0.013682570982143 0.736398452636592 0.089642717236325 0.618518501967293 0.144408255208566 0.089642717236325 0.618518501967293 0.144408255208566 -2.022515650016641; 0.334482211088805 0.013682570982143 0.434482211088805 0.013682570982143 0.393526331881820 0.033179497706817 -0.736229907436902 0.393526331881820 0.033179497706817 -0.736229907436902 -1.520705797735536; 0.013682570982143 0.736398452636592 0.013682570982143 0.746398452636592 0.092493535636325 0.623587697067293 0.144408255208566 0.092493535636325 0.623587697067293 0.144408255208566 -2.046779503101181; 0.346026331881821 0.089642717236325 0.393526331881821 0.092493535636325 0.404746382499118 0.031761867747475 -0.643351469563386 0.404746382499118 0.031761867747475 -0.643351469563386 -1.557579115597717; -0.067470502293183 0.618518501967293 0.033179497706817 0.623587697067293 0.031761867747475 0.697213051450218 0.545336264308738 0.031761867747475 0.697213051450218 0.545336264308738 -2.028125150875103; -0.736229907436903 0.144408255208566 -0.736229907436903 0.144408255208566 -0.643351469563386 0.545336264308737 4.529499627421395 -0.643351469563386 0.545336264308737 4.529499627421395 0.841220381419191; 0.346026331881821 0.089642717236325 0.393526331881821 0.092493535636325 0.404746382499118 0.031761867747475 -0.643351469563386 0.404746382499118 0.031761867747475 -0.643351469563386 -1.557579115597717; -0.067470502293183 0.618518501967294 0.033179497706817 0.623587697067293 0.031761867747475 0.697213051450218 0.545336264308738 0.031761867747475 0.697213051450218 0.545336264308738 -2.028125150875104; -0.736229907436903 0.144408255208566 -0.736229907436903 0.144408255208566 -0.643351469563386 0.545336264308737 4.529499627421395 -0.643351469563386 0.545336264308737 4.529499627421395 0.841220381419191; -1.072015087735536 -2.022515650016641 -1.520705797735536 -2.046779503101181 -1.557579115597717 -2.028125150875103 0.841220381419190 -1.557579115597717 -2.028125150875103 0.841220381419190 11.222643465877262]
@test all(isapprox.(N.P, P))
end # testset
end # module
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 252 | module RunTests
println("Running tests...")
include("continuous_test.jl")
include("discrete_test.jl")
include("example_1_test.jl")
include("example_2_test.jl")
include("test_id.jl")
include("test_version.jl")
include("test_periodic.jl")
end # module
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 4633 | module TestID
using Test
using JitterTime
function aux_cont(ids::Vector)
A1 = [-1 1; 0 -1]
B1 = [0; 1]
C1 = [1. 0]
D1 = 0
A2 = 0.
B2 = 1
C2 = -1
D2 = 0
R1 = B1*B1'
R2 = 1.
Q1 = C1'*C1
Q2 = 1.
Q2 = zeros(2, 2)
plant = ContinuousSystem(ids[1], A1, B1, C1, D1, ids[2], R1, [1. 0 0; 0 1 0; 0 0 1])
controller = ContinuousSystem(ids[2], A2, B2, C2, D2, ids[1], R2, Q2)
N = JitterTimeSystem([plant, controller])
calcDynamics!(N)
passTime!(N, 1000.)
M = JitterTimeSystem([controller, plant])
calcDynamics!(M)
passTime!(M, 1000.)
Nindices = Vector{UnitRange}(undef, length(N.systems))
Mindices = Vector{UnitRange}(undef, length(M.systems))
Ntotstates = 0
Mtotstates = 0
for i in 1:length(N.systems)
# TODO: Add "stateindex"?
Nn = N.systems[i].n
Mn = M.systems[i].n
Nindices[i] = UnitRange(Ntotstates+1, Ntotstates+Nn)
Mindices[i] = UnitRange(Mtotstates+1, Mtotstates+Mn)
Ntotstates += Nn
Mtotstates += Mn
end
return N, Nindices, M, Mindices
end # function
function aux_disc(ids::Vector)
A1 = [-1 1; 0 -1]
B1 = [0; 1]
C1 = [1 0]
D1 = 0
A2 = 0
B2 = 1
C2 = -1
D2 = 0
R1 = [0 0 0; 0 1 0; 0 0 1]
R2 = [0 0; 0 0.1]
Q1 = [1 0 0; 0 2 3; 0 3 0] # (n+p)=⍴Qd
Q2 = [0 0; 0 1]
plant = DiscreteSystem(ids[1],A1,B1,C1,D1,ids[2],R1,Q1)
controller = DiscreteSystem(ids[2],A2,B2,C2,D2,ids[1],R2,Q2)
N = JitterTimeSystem([plant, controller])
calcDynamics!(N)
execSys!(N,ids[1])
execSys!(N,ids[2])
execSys!(N,ids[2])
execSys!(N,ids[1])
execSys!(N,ids[2])
M = JitterTimeSystem([controller, plant])
calcDynamics!(M)
execSys!(M,ids[1])
execSys!(M,ids[2])
execSys!(M,ids[2])
execSys!(M,ids[1])
execSys!(M,ids[2])
Nindices = Vector{UnitRange}(undef, length(N.systems))
Mindices = Vector{UnitRange}(undef, length(M.systems))
Ntotstates = 0
Mtotstates = 0
for i in 1:length(N.systems)
# TODO: Add "stateindex"?
Nn = N.systems[i].n
Mn = M.systems[i].n
Nindices[i] = UnitRange(Ntotstates+1, Ntotstates+Nn)
Mindices[i] = UnitRange(Mtotstates+1, Mtotstates+Mn)
Ntotstates += Nn
Mtotstates += Mn
end
return N, Nindices, M, Mindices
end # function
@testset "ID test - Pure Continuous (Simple IDs)" begin
N, Nindices, M, Mindices = aux_cont([1, 2])
@test isapprox(N.dJdt, M.dJdt)
@test isapprox(N.J, M.J)
@test all(isapprox.(N.Ac[Nindices[1], Nindices[1]], M.Ac[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.Rc[Nindices[1], Nindices[1]], M.Rc[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.Qc[Nindices[1], Nindices[1]], M.Qc[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.P[Nindices[1], Nindices[1]], M.P[Mindices[2], Mindices[2]]))
end # testset
@testset "ID test - Pure Continuous (Complex IDs)" begin
N, Nindices, M, Mindices = aux_cont([49_463, 12_289])
@test isapprox(N.dJdt, M.dJdt)
@test isapprox(N.J, M.J)
@test all(isapprox.(N.Ac[Nindices[1], Nindices[1]], M.Ac[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.Rc[Nindices[1], Nindices[1]], M.Rc[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.Qc[Nindices[1], Nindices[1]], M.Qc[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.P[Nindices[1], Nindices[1]], M.P[Mindices[2], Mindices[2]]))
end # testset
@testset "ID test - Pure Discrete (Simple IDs)" begin
N, Nindices, M, Mindices = aux_cont([1, 2])
@test isapprox(N.dJdt, M.dJdt)
@test isapprox(N.J, M.J)
@test all(isapprox.(N.Ac[Nindices[1], Nindices[1]], M.Ac[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.Rc[Nindices[1], Nindices[1]], M.Rc[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.Qc[Nindices[1], Nindices[1]], M.Qc[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.P[Nindices[1], Nindices[1]], M.P[Mindices[2], Mindices[2]]))
end # testset
@testset "ID test - Pure Discrete (Complex IDs)" begin
N, Nindices, M, Mindices = aux_cont([26_561, 64_591])
@test isapprox(N.dJdt, M.dJdt)
@test isapprox(N.J, M.J)
@test all(isapprox.(N.Ac[Nindices[1], Nindices[1]], M.Ac[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.Rc[Nindices[1], Nindices[1]], M.Rc[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.Qc[Nindices[1], Nindices[1]], M.Qc[Mindices[2], Mindices[2]]))
@test all(isapprox.(N.P[Nindices[1], Nindices[1]], M.P[Mindices[2], Mindices[2]]))
end # testset
end # module
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 2284 | module TestPeriodic
using JitterTime
using Test
@testset "Periodic Tests" begin
p_id = 10
s_id = 20
c_id = 30
h = 1
plant = ContinuousSystem(p_id, 0, 1, 1, 0, c_id, 1, [1 0; 0 0])
sampler = DiscreteSystem(s_id, 1, p_id)
ctrler = DiscreteSystem(c_id, -(3+sqrt(3))/(2+sqrt(3))/h, s_id)
N = JitterTimeSystem([plant, sampler, ctrler])
Np = PeriodicJitterTimeSystem(N)
@test_throws ErrorException PeriodicJitterTimeSystem(Np)
calcDynamics!(Np)
beginPeriodicAnalysis!(Np)
for _ in 1:10
passTime!(Np, 0.1)
end # for
execSys!(Np, s_id)
execSys!(Np, c_id)
endPeriodicAnalysis!(Np)
@test isapprox(Np.J, 0.5)
@test isapprox(Np.dJdt, 0.95)
@test all(isapprox.(Np.Ac, [0 1; 0 0]))
@test all(isapprox.(Np.Rc, [1 0; 0 0]))
@test all(isapprox.(Np.Pper, [1.077350269189626 1.077350269189626 -1.366025403784438; 1.077350269189626 1.077350269189626 -1.366025403784438; -1.366025403784438 -1.366025403784438 1.732050807568876]))
@test all(isapprox.(Np.mper, [0; 0; 0]))
@test all(isapprox.(Np.Atot, [1.0 0 1.0; 1.0 0 1.0; -1.267949192431123 0 -1.267949192431122]))
@test all(isapprox.(Np.Rtot, [1.0 1.0 -1.267949192431122; 1.0 1.0 -1.267949192431122; -1.267949192431122 -1.267949192431122 1.607695154586736]))
@test all(isapprox.(Np.dtot, [0; 0; 0]))
reset!(Np)
beginPeriodicAnalysis!(Np)
for _ in 1:10
passTime!(Np, 0.1)
end # for
execSys!(Np, s_id)
execSys!(Np, c_id)
endPeriodicAnalysis!(Np)
@test isapprox(Np.J, 0.5)
@test isapprox(Np.dJdt, 0.95)
@test all(isapprox.(Np.Ac, [0 1; 0 0]))
@test all(isapprox.(Np.Rc, [1 0; 0 0]))
@test all(isapprox.(Np.Pper, [1.077350269189626 1.077350269189626 -1.366025403784438; 1.077350269189626 1.077350269189626 -1.366025403784438; -1.366025403784438 -1.366025403784438 1.732050807568876]))
@test all(isapprox.(Np.mper, [0; 0; 0]))
@test all(isapprox.(Np.Atot, [1.0 0 1.0; 1.0 0 1.0; -1.267949192431123 0 -1.267949192431122]))
@test all(isapprox.(Np.Rtot, [1.0 1.0 -1.267949192431122; 1.0 1.0 -1.267949192431122; -1.267949192431122 -1.267949192431122 1.607695154586736]))
@test all(isapprox.(Np.dtot, [0; 0; 0]))
end # testset
end # module
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | code | 1278 | module TestVersion
using JitterTime
using Test
@testset "Test Versions" begin
h = 1.0
A1 = 0
B1 = 1
C1 = 1
D1 = 0
R1 = 1
Q1 = [1 0; 0 1]
D3 = -(3+sqrt(3))/(2+sqrt(3))/h
plant = ContinuousSystem(10,A1,B1,C1,D1,30,R1,Q1)
sampler = DiscreteSystem(20,1,10)
sampler2 = DiscreteSystem(20,0,10)
samplerver = VersionedSystem([sampler, sampler2])
controller = DiscreteSystem(30,D3,20)
N = JitterTimeSystem([plant, samplerver, controller])
calcDynamics!(N)
dt = h/10.0
for k = 1:6
for j = 1:10
passTime!(N,dt)
end
if k == 3
execSys!(N,20,1)
execSys!(N,30)
elseif k > 3
execSys!(N,20,2)
execSys!(N,30)
end
end
@test all(isapprox.(N.J, 15.057713659400530))
@test all(isapprox.(N.dJdt, 3.165390309173476))
@test all(isapprox.(N.Ac, [0 1; 0 0]))
@test all(isapprox.(N.P, [3.215390309173475 0 0; 0 0 0; 0 0 0]))
@test all(isapprox.(N.Qc, [1 0; 0 1]))
@test all(isapprox.(N.Rc, [1 0; 0 0]))
samp_idx = N.idtoindex[20]
@test all(isapprox.(N.Ad[samp_idx][1], [1 0 0; 1 0 0; 0 0 1]))
@test all(isapprox.(N.Ad[samp_idx][2], [1 0 0; 0 0 0; 0 0 1]))
end # testset
end # module
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.1.1 | cd42819cf623bf7b4a3a251c85f27ed74bd1e1d8 | docs | 300 | # JitterTime.jl
[](https://github.com/X-N-C/JitterTime.jl/actions)
[](https://codecov.io/gh/X-N-C/JitterTime.jl)
__TO BE WRITTEN SHORTLY__
| JitterTime | https://github.com/X-N-C/JitterTime.jl.git |
|
[
"MIT"
] | 0.2.0 | 71bc1e3ee8d345220ddaf300641340c24e3bf5d1 | code | 1960 | using Dagger, DaggerWebDash, TimespanLogging
function start_viz()
ctx = Dagger.Sch.eager_context()
ml = TimespanLogging.MultiEventLog()
## Add some logging events of interest
ml[:core] = TimespanLogging.Events.CoreMetrics()
ml[:id] = TimespanLogging.Events.IDMetrics()
ml[:timeline] = TimespanLogging.Events.TimelineMetrics()
# ...
# (Optional) Enable profile flamegraph generation with ProfileSVG
ml[:profile] = DaggerWebDash.ProfileMetrics()
ctx.profile = true
# Create a LogWindow; necessary for real-time event updates
lw = TimespanLogging.Events.LogWindow(20*10^9, :core)
ml.aggregators[:logwindow] = lw
# Create the D3Renderer server on port 8080
d3r = DaggerWebDash.D3Renderer(8080)
## Add some plots! Rendered top-down in order
# Show an overview of all generated events as a Gantt chart
push!(d3r, DaggerWebDash.GanttPlot(:core, :id, :esat, :psat; title="Overview"))
# Show various numerical events as line plots over time
push!(d3r, DaggerWebDash.LinePlot(:core, :wsat, "Worker Saturation", "Running Tasks"))
push!(d3r, DaggerWebDash.LinePlot(:core, :loadavg, "CPU Load Average", "Average Running Threads"))
push!(d3r, DaggerWebDash.LinePlot(:core, :bytes, "Allocated Bytes", "Bytes"))
push!(d3r, DaggerWebDash.LinePlot(:core, :mem, "Available Memory", "% Free"))
# Show a graph rendering of compute tasks and data movement between them
# Note: Profile events are ignored if absent from the log
push!(d3r, DaggerWebDash.GraphPlot(:core, :id, :timeline, :profile, "DAG"))
# TODO: Not yet functional
#push!(d3r, DaggerWebDash.ProfileViewer(:core, :profile, "Profile Viewer"))
# Add the D3Renderer as a consumer of special events generated by LogWindow
push!(lw.creation_handlers, d3r)
push!(lw.deletion_handlers, d3r)
# D3Renderer is also an aggregator
ml.aggregators[:d3r] = d3r
ctx.log_sink = ml
end
| Waluigi | https://github.com/mrufsvold/Waluigi.jl.git |
|
[
"MIT"
] | 0.2.0 | 71bc1e3ee8d345220ddaf300641340c24e3bf5d1 | code | 7242 | # This is the internal Job structure
# TODO save the file and line number for the job definition so we can give clear debugging information
abstract type AbstractJob end
mutable struct ScheduledJob{T<:AbstractTarget}
job_id::UInt64
target::T
data
function ScheduledJob(job_id, target, data)
returned_value_type = typeof(data)
expected_type = return_type(target)
if !(returned_value_type <: expected_type)
throw(ArgumentError("Type of data must be a subtype of the target return type"))
end
return new{typeof(target)}(job_id, target,data)
end
end
# This is what an executed Job will return
return_type(sj::ScheduledJob) = return_type(sj.target)
Base.convert(::Type{ScheduledJob}, ::Nothing) = ScheduledJob(zero(UInt64), NoTarget(), nothing)
# This is the interface for a Job. They dispatch on job type
get_dependencies(job) = nothing
get_target(job) = nothing
run_process(job, dependencies, target) = nothing
# Similar interface for ScheduledJob
get_dependencies(r::ScheduledJob) = r.dependencies
get_target(r::ScheduledJob) = r.target
get_result(t::Dagger.EagerThunk) = get_result(fetch(t))
get_result(j::ScheduledJob{T}) where {T} = j.data::return_type(T)
# This are the accepted versions of containers of jobs that the user can define for depenencies
const AcceptableDependencyContainers = Union{
Vector{<:AbstractJob},
AbstractDict{Symbol, <:AbstractJob},
}
"""
@Job(job_description)
Constructs a new job based on a description.
## Description Format
```juila
MyNewJob = @Job begin
# Paramters are the input values for the job
parameters = (param1::String, param2::Int, param)
# Dependencies list jobs that should be inputs to this job (Optional)
# They can be created programmatically using parameters, and must return an
# <:AbstractJob, AbstractArray{<:AbstractJob}, or AbstractDict{Symbol, <:AbstractJob}
dependencies = [[SomeJob(i) for i in 1:param2]; AnotherJob("input")]
# Target is an output location to cache the result. If the target exists, the job will
# be skipped and the cached result will be returned (Optional).
target = FileTarget(joinpath(snf_path, "raw_tables", "\$month.csv"))
# The process function will be executed when the job is called.
# All parameters, `dependencies`, and `target` are defined in this scope.
process = begin
dep1_data = dependencies[1]
x = do_logic(dep1_data, param1)
write(target, x)
return x
end
end
```
"""
macro Job(job_description)
# This returns a Dict of job parameters and their values
job_features = extract_job_features(job_description)
job_name = job_features[:name]
if job_name == Expr(:block, :nothing)
return :(throw(ArgumentError("Job definitions require a `name` field but none was provided.")))
end
# Cleaning the parameters passed in
raw_parameters = job_features[:parameters]
# This is the list of parameters including type annotations if applicable
parameter_list = raw_parameters isa Symbol ? (raw_parameters,) : raw_parameters.args
# This is just the names
parameter_names = [arg isa Symbol ? arg : arg.args[1] for arg in parameter_list]
# get_dependencies need to return an `AcceptableDependencyContainers` and target needs to return an <:AbstractTarget
# these functions append some protections and raise errors if the function returns an upexpected type
dependency_func = add_get_dep_return_type_protection(job_features[:dependencies])
target_func = add_get_target_return_type_protection(job_features[:target])
dependency_ex = unpack_input_function(:get_dependencies, job_name, parameter_names, dependency_func)
target_ex = unpack_input_function(:get_target, job_name, parameter_names, target_func)
process_ex = unpack_input_function(:run_process, job_name, parameter_names, job_features[:process], (:dependencies, :target))
struct_def = :(struct $job_name <: Waluigi.AbstractJob end)
push!(struct_def.args[3].args, parameter_list...)
# TODO: check if there is already a struct defined that is a complete match (name, fields, types)
# if there is, then just emit the functions so since the user is probably just trying to
# edit the implementation of a funciton
return quote
$(esc(struct_def))
$(esc(dependency_ex))
$(esc(target_ex))
$(esc(process_ex))
end
end
function extract_job_features(job_description)
job_features = Dict{Symbol,Any}()
for element in job_description.args
if element isa LineNumberNode
continue
end
feature_name = element.args[1]
if !(feature_name in (:name, :dependencies, :target, :process, :parameters))
error("Got feature name $feature_name. Expected one of :name, :dependencies, :target, :process, :parameters.")
end
job_features[feature_name] = element.args[2]
end
for feature in (:name, :dependencies, :target, :process)
if !(feature in keys(job_features))
job_features[feature] = Expr(:block, :nothing)
end
end
if !(:parameters in keys(job_features)) || job_features[:parameters] == :nothing
job_features[:parameters] = :(())
end
return job_features
end
function unpack_input_function(function_name, job_name, parameter_names, function_body, additional_inputs=())
return quote
function Waluigi.$function_name(job::$job_name, $(additional_inputs...))
let $((:($name = job.$name) for name in parameter_names)...)
$function_body
end
end
end
end
function add_get_dep_return_type_protection(func_block)
return quote
t = begin
$func_block
end
corrected_deps = if t isa Nothing
Waluigi.AbstractJob[]
elseif t isa Waluigi.AbstractJob
typeof(t)[t]
elseif t isa Waluigi.AcceptableDependencyContainers
t
elseif t isa Type && t <: Waluigi.AbstractJob
throw(ArgumentError("""The dependencies definition in $(typeof(job)) returned a AbstractJob type,\
but dependencies must be an instance of a job. Try calling the job like `$(t)(args...)`"""))
else
throw(ArgumentError("""The dependencies definition in $(typeof(job)) returned a $(t) \
which is not one of the accepted return types. It must return one of the following: \
<: AbstractJob, AbstractArray{<:AbstractJob}, Dict{Symbol, <:AbstractJob}"""))
end
return corrected_deps
end
end
function add_get_target_return_type_protection(func_block)
return quote
t = begin
$func_block
end
corrected_target = if t isa Nothing
# TODO need to find a way to parameterize when no target is specified
Waluigi.NoTarget{Any}()
elseif t isa Waluigi.AbstractTarget
t
else
throw(ArgumentError("""The target definition definition in $(typeof(job)) returned a $(typeof(t)), \
but target must return `nothing` or `<:AbstractTarget`"""))
end
return corrected_target
end
end
| Waluigi | https://github.com/mrufsvold/Waluigi.jl.git |
|
[
"MIT"
] | 0.2.0 | 71bc1e3ee8d345220ddaf300641340c24e3bf5d1 | code | 7079 | """
run_pipeline(head_job)
Given an instantiated Job, satisfied all dependencies recursively and return the result of
the final job.
"""
function run_pipeline(head_job, ignore_target=false; visualizer=false)
if visualizer
@info "Creating visualizer at http://localhost:8080/"
start_viz()
end
# Jobs is a dict id => AbstractJob
# dep_rel is a set{Tuple{job id, dep id, satisfied}
# job status is a dict id => bool, true means ready to run
@info "Initiating the pipeline"
(jobs, dependency_relations, initial_ready_jobs) = get_dependency_details(head_job)
@info "Collected $(length(jobs)) jobs to complete. "
results = Dict{UInt64, Union{Nothing, Dagger.EagerThunk}}(
id => nothing
for id in initial_ready_jobs
)
job_id = first(initial_ready_jobs)
completed_all_jobs = false
while !completed_all_jobs
job_deps = [
results[dep_pair[2]]
for dep_pair in dependency_relations
if dep_pair[1] == job_id
]
@debug "Spawning $(jobs[job_id])"
results[job_id] = Dagger.@spawn (jobs[job_id])(job_id, ignore_target, job_deps...)
# Find upstream jobs and check if completing this job makes them ready for execution
upstream_jobs = dependency_relations |>
# get jobs that depend on the completed job
((dr) -> Iterators.filter((p) -> p[2] == job_id, dr)) .|>
# Set "satisfied" field to true
((p) -> (delete!(dependency_relations, p); p)) .|>
((p) -> (push!(dependency_relations, (p[1], p[2], true)); p[1]))
for upstream in upstream_jobs
is_unsatsifed_match_flag = Iterators.map(
(rel) -> rel[1] == upstream && !rel[3],
dependency_relations
)
upstream_ready = !any(is_unsatsifed_match_flag)
if upstream_ready
results[upstream] = nothing
end
end
job_id = findfirst(p -> p[2] === nothing, (p for p in pairs(results)))
if job_id === nothing
completed_all_jobs = true
end
end
results[hash(head_job)]
end
function get_dependency_details(head_job)
# The look up for the actual job objects
jobs = Dict{UInt64, AbstractJob}()
# Tracking if a job is ready to run
ready_jobs = Set{UInt64}()
dep_relations = Dict{Tuple{UInt64,UInt64,Bool}, Int}()
traverse_dependencies!(head_job, jobs, dep_relations, ready_jobs)
dep_relations = Set(keys(dep_relations))
return (jobs = jobs, dependency_relations = dep_relations, ready_jobs = ready_jobs)
end
function traverse_dependencies!(job, jobs, dep_relations, ready_jobs, depth = 1)
@debug "Maximum debug depth is 100. Currently at depth $depth"
job_id = hash(job)
if depth > 100
error("Reached maximum depth. It's possible that there is a cycle in the dependencies but the parameters are different each time.")
end
# We need to get the dependencies as a vector (get values of dicts)
dep_container = get_dependencies(job)
dep_list = get_dependencies_list(dep_container)
# Jobs with no dependencies are ready to run
if isempty(dep_list)
push!(ready_jobs, job_id)
end
jobs[job_id] = job
# Go get grandchild dependency information
for dep in dep_list
dep_id = hash(dep)
dependency = (job_id, dep_id, false)
relation_occurances = get(dep_relations, dependency, 0) + 1
if relation_occurances > 25
check_for_circular_dependencies(jobs, keys(dep_relations))
end
dep_relations[(job_id, dep_id, false)] = relation_occurances
traverse_dependencies!(dep, jobs, dep_relations, ready_jobs, depth + 1)
end
return nothing
end
get_dependencies_list(deps::AbstractDict{Symbol, AbstractJob}) = collect(values(deps))
get_dependencies_list(::Nothing) = AbstractJob[]
get_dependencies_list(deps) = deps
function check_for_circular_dependencies(jobs, dependency_relations)
# SimpleDiGraph only uses OneTo Ints as IDs so we need a map back to job ids
job_id_to_idx = Dict(
job_id => i
for (i, job_id) in enumerate(keys(jobs))
)
g = SimpleDiGraph(length(keys(jobs)))
for (job_id, dep_id, _) in dependency_relations
job_idx = job_id_to_idx[job_id]
dep_idx = job_id_to_idx[dep_id]
add_edge!(g, job_idx, dep_idx)
end
cycles = Graphs.simplecycles_iter(g)
# Build a clean looking printout for the dependency cycle error
if length(cycles) >= 1
cycle_explanation = Vector{String}(undef, length(cycles) + sum(length.(cycles)))
explain_idx = 1
for (i, cycle) in enumerate(cycles)
cycle_explanation[explain_idx] = "Cycle Number $i\n"
explain_idx += 1
for ci in eachindex(cycle)
job_idx = cycle[ci]
dep_idx = cycle[(ci)%length(cycle) + 1]
job_id = findfirst(p -> p[2]==job_idx, (p for p in pairs(job_id_to_idx)))[1]
dep_id = findfirst(p -> p[2]==dep_idx, (p for p in pairs(job_id_to_idx)))[1]
job = jobs[job_id]
dep = jobs[dep_id]
cycle_explanation[explain_idx] = "\tDependency Pair$ci\n\t\t$job\n\t\t$dep\n"
explain_idx += 1
end
end
throw(InvalidStateException("The dependency tree contains cycles. Please resolve.\n" * foldl(*, cycle_explanation), :CyclicalDependency))
end
end
id_to_name(hash_id) = Symbol("__$hash_id")
function (job::AbstractJob)(job_id::UInt64, ignore_target, dependency_results...)
@debug "Running spawned execution for job ID $job_id. Details: $job"
target = get_target(job)
if iscomplete(target) && !ignore_target
@debug "$job_id was already complete. Retrieving the target"
data = retrieve(target)
return ScheduledJob(job_id, target, data)
end
original_deps = get_dependencies(job)
dependencies = replace_dep_job_with_result(original_deps, dependency_results)
actual_result = run_process(job, dependencies, target)
@debug "Ran dep, target, and process funcs against $job_id. Return type is $(typeof(actual_result))"
data = if target isa NoTarget
actual_result
else
@debug "Storing result for $job_id"
store(target, actual_result)
retrieve(target)
end
@debug "$job_id is complete."
return ScheduledJob(job_id, target, data)
end
function replace_dep_job_with_result(dep_jobs::AbstractArray, dep_results)
return ScheduledJob[
dep_results[findfirst(res -> res.job_id == hash(job), dep_results) ]
for job in dep_jobs]
end
function replace_dep_job_with_result(dep_jobs::AbstractDict, dep_results)
return Dict{Symbol,ScheduledJob}(
k => dep_results[findfirst(res -> res.job_id == hash(job), dep_results) ]
for (k,job) in pairs(dep_jobs))
end
| Waluigi | https://github.com/mrufsvold/Waluigi.jl.git |
|
[
"MIT"
] | 0.2.0 | 71bc1e3ee8d345220ddaf300641340c24e3bf5d1 | code | 1377 | using Serialization
"""
A target is a result side effect of a process; the things we're trying to make the a pipeline.
The interface for a target has the following functions:
is_complete(::Target) Returns a bool for if the target has been created
complete(::Target) Called to clean up the target (i.e. move from tmp_dir to final)
open(::Target) Called to access the target. Usually, this means returning the tmp field until
the process is completed.
"""
abstract type AbstractTarget{T} end
return_type(::AbstractTarget{T}) where {T} = T
return_type(::Type{<:AbstractTarget{T}}) where {T} = T
struct NoTarget{T} <: AbstractTarget{T} end
NoTarget() = NoTarget{Any}()
Base.convert(::Type{AbstractTarget}, ::Nothing) = NoTarget{Any}()
iscomplete(::NoTarget) = false
"""BinFileTarget(path)
A target that serializes the result of a Job and stores it in a .bin file at the designated path.
"""
struct BinFileTarget{T} <: AbstractTarget{T}
path::String
function BinFileTarget{T}(path) where {T}
path = endswith(path, ".bin") ? path : path * ".bin"
return new{T}(path)
end
end
iscomplete(t::BinFileTarget) = isfile(t.path)
function store(t::BinFileTarget, data)
open(t.path, "w") do io
serialize(io, data)
end
end
function retrieve(t::BinFileTarget{T}) where {T}
open(t.path, "r") do io
deserialize(io)::T
end
end
| Waluigi | https://github.com/mrufsvold/Waluigi.jl.git |
|
[
"MIT"
] | 0.2.0 | 71bc1e3ee8d345220ddaf300641340c24e3bf5d1 | code | 228 | module Waluigi
using Dagger
using Graphs
export @Job, get_dependencies, get_target, get_result, execute, run_process
include("Target.jl")
include("Job.jl")
include("DaggerViz.jl")
include("Pipeline.jl")
end # module Waluigi
| Waluigi | https://github.com/mrufsvold/Waluigi.jl.git |
|
[
"MIT"
] | 0.2.0 | 71bc1e3ee8d345220ddaf300641340c24e3bf5d1 | code | 1066 | using Parquet2
using Tables
struct ParquetDirTarget <: Waluigi.AbstractTarget{Parquet2.Dataset}
path::String
write_kwargs
read_kwargs
end
ParquetDirTarget(path; write_kwargs=(), read_kwargs=()) = ParquetDirTarget(path, write_kwargs, read_kwargs)
Waluigi.iscomplete(t::ParquetDirTarget) = isdir(t.path)
function Waluigi.store(t::ParquetDirTarget, data)
isdir(t.path) && rm(t.path; force=true, recursive=true)
mkdir(t.path)
if Tables.istable(data)
store_one(data, joinpath(t.path, "1.parq"), t.write_kwargs...)
else
store_many(data, t.path, t.kwargs...)
end
return nothing
end
function store_one(data, path, kwargs...)
Parquet2.writefile(
path, data;
kwargs...
)
end
function store_many(chunks, path, kwargs...)
for (i, chunk) in enumerate(chunks)
Parquet2.writefile(
joinpath(path, "$i.parq"), chunk;
kwargs...
)
end
end
function Waluigi.retrieve(t::ParquetDirTarget)
return Parquet2.Dataset(t.path; t.read_kwargs...)
end
| Waluigi | https://github.com/mrufsvold/Waluigi.jl.git |
|
[
"MIT"
] | 0.2.0 | 71bc1e3ee8d345220ddaf300641340c24e3bf5d1 | code | 3865 | global_logger(ConsoleLogger(stderr, Logging.Error))
using Scratch
function __init__()
global test_files = get_scratch!(@__MODULE__, "test_files")
end
__init__()
using Test
using DataFrames
using Dagger
using Waluigi
# Putting all the structs for tester jobs in a module makes it easier to iterate
include("./custom_target.jl")
include("./test_jobs.jl")
# My hacky version of checking if struct results are the same
field_equal(v1, v2) = (v1==v2) isa Bool ? v1==v2 : false
field_equal(::Nothing, ::Nothing) = true
field_equal(::Missing, ::Missing) = true
field_equal(a1::AbstractArray, a2::AbstractArray) = length(a1) == length(a2) && field_equal.(a1,a2) |> all
function fields_equal(o1, o2)
for name in fieldnames(typeof(o1))
prop1 = getproperty(o1, name)
prop2 = getproperty(o2, name)
if !field_equal(prop1, prop2)
println("Didn't match on $name. Got $prop1 and $prop2")
return false
end
end
return true
end
@testset "All nothing jobs description" begin
for job in (TestJobs.NothingJob(), TestJobs.OopsAllOmited())
@test get_dependencies(job) == []
@test get_target(job) isa Waluigi.NoTarget
@test run_process(job, [nothing], nothing) isa Nothing
@test fields_equal(
get_result(Waluigi.run_pipeline(job)),
Waluigi.ScheduledJob(zero(UInt64), Waluigi.NoTarget(), nothing))
end
end
@testset "Basic dependencies" begin
@test begin
result = Waluigi.run_pipeline(TestJobs.MainJob())
Waluigi.get_result(result) == 7
end
end
@testset "Malformed Jobs" begin
@test_throws ArgumentError get_result(Waluigi.run_pipeline(TestJobs.BadDeps()))
@test_throws Dagger.ThunkFailedException get_result(Waluigi.run_pipeline(TestJobs.BadTarget()))
@test_throws ArgumentError @Job begin parameters = nothing; process = 5 end
@test_throws InvalidStateException Waluigi.run_pipeline(TestJobs.CycleDepA())
end
@testset "Checkpointing" begin
checkpoint_fp = joinpath(test_files, "checkpoint_tester.bin")
# CheckPointTester just caches the value it's given and returns it.
first_checkpoint_tester = TestJobs.CheckPointTester(1)
first_checkpoint_res = get_result(Waluigi.run_pipeline(first_checkpoint_tester))
@test isfile(checkpoint_fp)
@test first_checkpoint_res == 1
# But since the path to the target is the same for all instances, this new version of CheckPointTester will
# still return `1` since it's just going to grab the cached result regardless of the input
second_checkpoint_res = TestJobs.CheckPointTester(2)
@test 1 == second_checkpoint_res |> Waluigi.run_pipeline |> get_result
@test 2 == Waluigi.run_pipeline(second_checkpoint_res, true) |> get_result
rm(checkpoint_fp)
# Checkpoint with custom target, same strategy as above
test_parq_dir = joinpath(test_files, "test_parq_dir")
parq_file = joinpath(test_parq_dir, "1.parq")
rm(test_parq_dir; force=true, recursive=true)
@test !isdir(test_parq_dir)
df_1 = DataFrame(a=[1,2,3], b=["a","b","c"])
use_custom_1 = TestJobs.UsingCustomTarget(df_1, test_parq_dir)
@test df_1 == (Waluigi.run_pipeline(use_custom_1) |> get_result |> DataFrame)
@test isfile(parq_file)
df_2 = DataFrame(e=[1,1,1])
use_custom_2 = TestJobs.UsingCustomTarget(df_2, test_parq_dir)
@test df_1 == (Waluigi.run_pipeline(use_custom_2) |> get_result |> DataFrame)
rm(test_parq_dir; force=true, recursive=true)
end
@testset "Typing Parameters" begin
@test TestJobs.TypedParams(1,"a").a == 1
@test_throws MethodError TestJobs.TypedParams(1,5)
end
@testset "Use a custom type in a function" begin
@test begin
i_use_tester_type = TestJobs.IUseTesterType(TestJobs.TesterType())
Waluigi.run_pipeline(i_use_tester_type) |> get_result
end
end
| Waluigi | https://github.com/mrufsvold/Waluigi.jl.git |
|
[
"MIT"
] | 0.2.0 | 71bc1e3ee8d345220ddaf300641340c24e3bf5d1 | code | 2137 | module TestJobs
import ..Waluigi
Waluigi.@Job begin
name = NothingJob
parameters = nothing
dependencies = nothing
target = nothing
process = nothing
end
Waluigi.@Job begin
name = OopsAllOmited
end
Waluigi.@Job begin
name = DepJob
parameters = (a,b)
dependencies = nothing
target = nothing
process = begin
return a + b
end
end
Waluigi.@Job begin
name = MainJob
parameters = nothing
dependencies = [DepJob(2,4)]
target = nothing
process = begin
sum_dep = Waluigi.get_result(dependencies[1])
return sum_dep + 1
end
end
Waluigi.@Job begin
name = BadDeps
parameters = nothing
dependencies = (a = DepJob(2,4),)
target = nothing
process = nothing
end
Waluigi.@Job begin
name = BadTarget
parameters = nothing
dependencies = nothing
target = 42
process = nothing
end
Waluigi.@Job begin
name = CheckPointTester
parameters = (a,)
dependencies = nothing
target = Waluigi.BinFileTarget{typeof(a)}(joinpath(Main.test_files, "checkpoint_tester.bin"))
process = a
end
Waluigi.@Job begin
name = UsingCustomTarget
parameters = (tbl, parq_dir)
dependencies = nothing
target = Main.ParquetDirTarget(parq_dir; read_kwargs = (use_mmap=false,))
process = tbl
end
Waluigi.@Job begin
name = TypedParams
parameters = (a::Int, b::String)
dependencies = nothing
target = nothing
process = nothing
end
Waluigi.@Job begin
name = UsingTypedParams
parameters = (a::Int, b::String)
dependencies = nothing
target = Waluigi.BinFileTarget{Int}(joinpath(Main.test_files, "typed_checkpoint.bin"))
process = begin
s = a + 100
return s
end
end
Waluigi.@Job begin
name = CycleDepA
dependencies = CycleDepB()
end
Waluigi.@Job begin
name = CycleDepB
dependencies = CycleDepA()
end
Waluigi.@Job begin
name = ReturnDepTypeNotInstance
dependencies = DepJob
end
struct TesterType end
Waluigi.@Job begin
name = IUseTesterType
parameters = (a::TesterType,)
process = true
end
end # TestJobs Module
| Waluigi | https://github.com/mrufsvold/Waluigi.jl.git |
|
[
"MIT"
] | 0.2.0 | 71bc1e3ee8d345220ddaf300641340c24e3bf5d1 | docs | 3504 | # Waluigi.jl
Waluigi is a pure-Julia implementation of Luigi. It aims to provide a simple interface for defining
`Job`s in a data pipeline and linking dependencies together.
# Getting Started
You can get started with this quick example:
```julia
using Waluigi
module JobDefinitions
@Job begin
name = GetGreeting
process = begin
return "Hello"
end
end
@Job begin
name = GetAddressee
parameters = (name::String,)
process = name
end
@Job begin
name = SayHelloWorld
dependencies = Dict(
:Greeting => GetGreeting(),
:Addressee => GetAddressee("World")
)
process = begin
addressee = get_result(dependencies[:Addressee])
greeting = get_result(dependencies[:Greeting])
println("$greeting, $(addressee)!")
end
end
end # end module
JobDefinitions.SayHelloWorld() |> Waluigi.run_pipeline |> get_result
```
We'll look at this piece by piece.
The module `JobDefinitions` is not necessary for creating jobs, but it is convenient for debugging
and iterating because it allows you to redefine the Job structs without restarting your Julia
session.
`GetGreeting` and `GetAddressee` are both depencies of `SayHelloWorld`. Looking at `GetAddressee`
we can see that it has a parameter of type `String` which it returns without change in the
`process` function. Also, notice that `name` is immediately available to all fields
below it.
`SayHelloWorld` is the "end of the pipeline". It tells `Waluigi` to run its dependencies.
Each dependency returns a 'ScheduledJob' which provides information about the run of the job.
Here, all we need is the result, so we call `get_result`.
Of course, Hello world is the cannonical trivial example, but with these building blocks, you
can define complex dependencies and parameterize abstracted processes to reduce code reuse.
# Storing Results in a Target
Any step in the pipeline can be saved to a `Target`. A target can be a file on disk, a SQL
table, or anything else that can store and return data. Just like 'Job's, targets can be
defined by a user by implementing a small set of interface functions.
The required interface for an AbstractTarget is:
```julia
# Use T if you want to parameterize your target's return type. Otherwise, replace
# T with a specific type. This helps with type inference between Jobs, so you should use a
# type whenever possible
struct MyTarget{T} <: Waluigi.AbstracTarget{T}
# add fields here
end
# iscompleted returns a boolean indicating whether the process should be skipped because the
# target is completed.
function Waluigi.iscomplete(t::MyTarget)
true
end
# store accepts the target of a job and the data returend by `process` and stores it in the target
function Waluigi.store(t::MyTarget, data)
# logic
end
# Given a completed target, returns the retrieved data
function Waluigi.retrieve(t::MyTarget)
# get data
return data
end
```
The current implementation of the pipeline always stores the results of the job and the runs
retrieve and only passes on the retrieved data. This prevents a situation where the store and
retrieve functions are not perfect inverses of each other. This does result in cases where unnecessary
computation is required. In the future, there may be a new AbstractTarget type that will tell the
pipeline to only store and then return the actual result of process if the user is taking responsibility
for ensuring that the retrieved data is consistent with the the result data.
| Waluigi | https://github.com/mrufsvold/Waluigi.jl.git |
|
[
"MIT"
] | 0.2.0 | 71bc1e3ee8d345220ddaf300641340c24e3bf5d1 | docs | 1626 | # Vision
Waliugi should provide a seemless interface for constructing data pipelines. Users just need
to define an job that has three parts:
* a list of jobs on which the current job depends
* a target which holds the resulting data. Can be a local file, a database table, etc
* process function that providest the steps for creating the data
The workflow should be
Defining a list of jobs, calling `run()` on the final job, and then Waliuigi will
spawn all the dependent jobs required. If a job is already done, then it will read the target
and return that data. `run()` will return a Result object which contains references to the
results of the depenencies, the target, and the data created by the job.
We will lean on Dagger.jl for the backend infrastructure which schedules jobs, mantains the
graph of dependencies, and provides visualizations of the processes.
# TODO
* Investigate using Dagger Checkpoints
x = Dagger.delayed(sum;
options=Dagger.Sch.ThunkOptions(;
checkpoint=(thunk,result)->begin
open("checkpoint.bin", "w") do io
serialize(io, collect(result))
end
end,
restore=(thunk)->begin
dump(thunk)
open("checkpoint.bin", "r") do io
Dagger.tochunk(deserialize(io))
end
end)
)([5,6])
* Consider using get_dependencies to improve type inference on NoTarget{Any}
* Docs for custom targets
| Waluigi | https://github.com/mrufsvold/Waluigi.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 2784 |
using SpecialFunctions, BesselK, ForwardDiff, StaticArrays
import ForwardDiff: derivative
# Convenience tool that will remove allocs:
@inline pv(scale, range, v) = @SVector [scale, range, v]
# Raw Bessel functions. Note also that there is the `adbesselkxv` method in
# BesselK.jl that gives you (x^v)*besselk(v,x) directly, sometimes with at least
# a slight gain to accuracy and speed. That isn't bound here, but obviously you
# could just slightly tweak this version.
R_besselk(v, x) = BesselK.adbesselk(v, x)
R_besselk_dx(v, x) = derivative(_x->R_besselk(v, _x), x)
R_besselk_dv(v, x) = derivative(_v->R_besselk(_v, x), v)
R_besselk_dx_dx(v, x) = derivative(_x->R_besselk_dx(v, _x), x)
R_besselk_dx_dv(v, x) = derivative(_x->R_besselk_dv(v, _x), x)
R_besselk_dv_dv(v, x) = derivative(_v->R_besselk_dv(_v, x), v)
# Unlike the Julia-native version, this takes a pre-computed distance instead of
# two coordinates x and y, because if we have to take them as straight
# Vector{Float64} items then that will make allocations in the kernel calls and
# totally kill performance. In Julia this is avoided by using StaticArrays, but
# I don't think I can ask R users to deal with that interface. If you care
# enough about performance and need the additional flexibility that that
# provides, you could extend this code...or just switch to Julia more fully.
#
# Note here that the parameter order is:
# (sigma (scale), rho (range), nu (smoothness))
#
# You can off course change all of this up, but then be careful to also update
# the book-keeping stuff in the derivative functions.
#
# TODO (cg 2022/01/02 17:54): what is the most common signature for Julia stuff
# here? As it stands, the parameters can be passed in as a tuple, but should the
# R version take all parameters as scalars to again avoid using straight
# Vector{Float64}?
function matern(dist, params)
(sg, rho, nu) = params
iszero(dist) && return sg*sg
arg = sqrt(2*nu)*dist/rho
(sg*sg*(2^(1-nu))/gamma(nu))*BesselK.adbesselkxv(nu, arg)
end
# First derivatives:
matern_d1(dist, p) = matern(dist, (sqrt(2*p[1]), p[2], p[3]))
matern_d2(dist, p) = derivative(_p->matern(dist,pv(p[1], _p, p[3])), p[2])
matern_d3(dist, p) = derivative(_p->matern(dist,pv(p[1], p[2], _p)), p[3])
# Second derivatives:
matern_d1_d1(dist, p) = derivative(_p->matern_d1(dist, pv(_p, p[2], p[3])), p[1])
matern_d1_d2(dist, p) = derivative(_p->matern_d1(dist, pv(p[1], _p, p[3])), p[2])
matern_d1_d3(dist, p) = derivative(_p->matern_d1(dist, pv(p[1], p[2], _p)), p[3])
matern_d2_d2(dist, p) = derivative(_p->matern_d2(dist, pv(p[1], _p, p[3])), p[2])
matern_d2_d3(dist, p) = derivative(_p->matern_d2(dist, pv(p[1], p[2], _p)), p[3])
matern_d3_d3(dist, p) = derivative(_p->matern_d3(dist, pv(p[1], p[2], _p)), p[3])
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 300 |
using StaticArrays, BesselK, ForwardDiff
# As simple as this:
const x_point = @SVector rand(2)
const y_point = @SVector rand(2)
const params = [1.0, 1.0, 1.0] # scale, range, smoothness.
# All derivatives, including the smoothness!
ForwardDiff.gradient(p->matern(x_point, y_point, p), params)
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 545 |
# Conclusion: for e-fish, pretty similar.
function efish_replicates(dfns, parms, pts, lendatav)
fish = zeros(3,3)
_S = Symmetric([matern(x, y, parms) for x in pts, y in pts])
S = cholesky!(_S)
for j in 1:3
dfj = dfns[j]
Sj = Symmetric([dfj(x, y, parms) for x in pts, y in pts])
fish[j,j] = tr(S\(Sj*(S\Sj)))/2
for k in (j+1):3
dfk = dfns[k]
Sk = Symmetric([dfk(x, y, parms) for x in pts, y in pts])
fish[j,k] = tr(S\(Sj*(S\Sk)))/2
fish[k,j] = fish[j,k]
end
end
fish*lendatav
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 2260 |
# As is discussed in the paper, you can see that the FD derivatives here are so
# inaccurate that Ipopt can't even converge. The FD Hessians are worse than
# garbage and completely unusable without very high-order and adaptive methods,
# which are so brutal to performance that they aren't even worth considering.
using Ipopt, Serialization
include("shared.jl")
include("gradient.jl")
include("ipopt_helpers.jl")
function fitter(objects_initval)
((case, gradfun, hesfun, maxiter), ini) = objects_initval
is_bfgs = in(case, (:FD_BFGS, :AD_BFGS))
println("Case: $case")
cache = Vector{Float64}[]
box_l = is_bfgs ? [1e-2, 1e-2, 0.25] : [0.0, 0.0, 0.25]
prob = createProblem(3, box_l, fill(1e22, (3,)), 0,
Float64[], Float64[], 0, div(3*4, 2),
_p->caching_nll(_p, cache),
(args...)->nothing,
gradfun,
(args...)->nothing,
(x,m,r,c,o,l,v)->ipopt_hessian(x,m,r,c,o,l,v,
hesfun,Function[],0))
addOption(prob, "tol", 1e-5)
addOption(prob, "max_iter", maxiter)
if is_bfgs
addOption(prob, "hessian_approximation", "limited-memory")
addOption(prob, "nlp_scaling_method", "none")
end
prob.x = deepcopy(ini) # for safety to avoid weird persistent pointer games.
try
@time status = solveProblem(prob)
_h = is_bfgs ? fil(NaN, 3, 3) : hesfun(prob.x)
return (prob, status, case, deepcopy(prob.x), _nll(prob.x), _h, cache, ini)
catch er
println("\n\nOptimization failed with error $er\n\n")
return (prob, :FAIL_OPT_ERR, case, deepcopy(prob.x), NaN, fill(NaN, 3, 3), cache, ini)
end
end
const cases = ((:FD_FISH, grad_fd!, fishfd, 100),
(:AD_FISH, grad_ad!, fishad, 100),
(:FD_HESS, grad_fd!, hessfd, 100),
(:AD_HESS, grad_ad!, _nllh, 100),
(:FD_BFGS, grad_fd!, no_hessian, 100),
(:AD_BFGS, grad_ad!, no_hessian, 100))
const inits = (ones(3), [1.0, 0.1, 2.0])
const test_settings = vec(collect(Iterators.product(cases, inits)))
if !isinteractive()
const res = map(fitter, test_settings)
serialize("fit_results.serialized", res)
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 504 |
function gradient_replicates!(gstore, dfns, parms, pts, datav)
fill!(gstore, zero(eltype(gstore)))
@assert length(gstore) == length(dfns) "Length of gstore and number of derivative functions don't match."
_S = Symmetric([matern(x, y, parms) for x in pts, y in pts])
S = cholesky(_S)
for j in eachindex(gstore)
dfj = dfns[j]
Sj = Symmetric([dfj(x, y, parms) for x in pts, y in pts])
gstore[j] = (tr(S\Sj)*length(datav) - sum(z->dot(z, S\(Sj*(S\z))), datav))/2
end
gstore
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 992 |
function hessian_replicates(dfns, d2fns, parms, pts, datav)
hess = zeros(3,3)
_S = Symmetric([matern(x, y, parms) for x in pts, y in pts])
S = cholesky(_S)
for j in 1:3
dfj = dfns[j]
Sj = Symmetric([dfj(x, y, parms) for x in pts, y in pts])
for k in j:3
# first derivative matrix:
dfk = dfns[k]
Sk = Symmetric([dfk(x, y, parms) for x in pts, y in pts])
# second derivative matrix:
dfjk = d2fns[j][k-j+1]
Sjk = Symmetric([dfjk(x, y, parms) for x in pts, y in pts])
# Compute the complicated derivative of the qform (not efficient or
# thoughtful, so don't use this code for real somewhere):
dqf = -(S\(Sk*(S\(Sj*inv(_S)))) )
dqf += S\(Sjk*inv(_S))
dqf -= S\(Sj*(S\(Sk*inv(_S))))
# compute the term:
term = (tr(S\Sjk) - tr(S\(Sj*(S\Sk))))*length(datav)/2
term -= sum(z->dot(z, dqf, z), datav)/2
hess[j,k] = term
hess[k,j] = term
end
end
hess
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1129 |
hes_reshape(h, len) = [h[i,j] for i=1:len for j=1:i]
function hes_structure!(rows, cols, len)
idx = 1
for row in 1:len
for col in 1:row
rows[idx] = row
cols[idx] = col
idx += 1
end
end
nothing
end
function ipopt_hessian(xarg, mode, rows, cols, obj_factor, lams, values,
hessfn, constr_hessv, nconstr)
(mode == :Structure) && return hes_structure!(rows, cols, length(xarg))
@assert length(lams) == nconstr "Disagreement in lengths of lambdas and constraint functions."
h = hessfn(xarg)
values .= hes_reshape(h, length(xarg)).*obj_factor
for (lj, hesconstrj) in zip(lams, constr_hessv)
constrj_hes = hesconstrj(xarg)
values .+= lj*hes_reshape(constrj_hes, length(xarg))
end
end
function jac_structure!(rows, cols, len, nconstr)
for (j, ix) in enumerate(Iterators.partition(1:(len*nconstr), len))
rows[ix].=j
cols[ix].=collect(1:len)
end
nothing
end
function ipopt_constr_jac(xarg, mode, rows, cols, values, g_jac, nconstr)
(mode == :Structure) && return jac_structure!(rows, cols, length(xarg), nconstr)
values .= g_jac(xarg)
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 3358 |
# Setting: imagine that we have a bunch of iid replicates of very correlated
# smooth data. Let's fit it and look at the resulting CIs.
using LinearAlgebra, StableRNGs
include("../../examples/matern.jl")
include("gradient.jl")
include("efish.jl")
include("hessian.jl")
# A likelihood function, slightly specialized for speed:
function nll_replicates(parms, pts, datav)
K = Symmetric([matern(x, y, parms) for x in pts, y in pts])
Kf = cholesky!(K) # in-place
out = logdet(Kf)*length(datav)/2 # only compute logdet once.
out += sum(z->sum(abs2, Kf.L\z), datav)/2 # all the solve terms.
out
end
# For plotting a likelihood surface. This uses the "profile likelihood", which
# takes advantage of the fact that, given all other parameters, the
# likelihood-miniziming variance can be expressed as
#
# sig_implied = dot(datav, K(sigma=1, other_parms...)\datav)/length(datav[1]).
#
# Since the full likelihood can be written with sigma pulled out of the matrix,
# you can actually just plug this right back in to the normal Gaussian
# likelihood and optimize all parameters at once still, but now you have a k-1
# dimensional problem instead of a k-dimensional problem.
#
# See, for example, the definition at the top of the second column in page 5 in
# the Geoga et al JCGS citation, although that's of course not the first time
# the idea has been used here and it dates back to at least the 90s.
function profile_nll_replicates(parms, pts, datav)
n = length(first(datav))
_p = @SVector [one(eltype(parms)), parms[1], parms[2]]
K = Symmetric([matern(x, y, _p) for x in pts, y in pts])
Kf = cholesky!(K) # in-place
out = logdet(Kf)*length(datav)/2 # only compute logdet once.
out += n*sum(z->log(sum(abs2, Kf.L\z)), datav)/2 # all the solve terms.
out
end
# simulate some data, using the "let" syntax to avoid keeping global K after
# we're done:
const TRU_P = @SVector [1.5, 2.5, 1.3]
const SEED = StableRNG(12345)
const N_REP = 10
const N_DAT = 512
const PTS = [SVector{2,Float64}(rand(SEED, 2)...) for _ in 1:N_DAT]
const SIMS = let K = Symmetric([matern(x, y, TRU_P) for x in PTS, y in PTS])
Kf = cholesky(K)
[Kf.L*randn(SEED, size(Kf, 2)) for _ in 1:N_REP]
end
# A second simulation of a single draw at more points for the likelihood
# surface:
const N_DAT_SURF = 1600
const TRU_P_SURF = @SVector [1.5, 0.5, 1.3]
const PTS_SURF = [SVector{2,Float64}(rand(SEED, 2)...) for _ in 1:N_DAT_SURF]
const SIM_SURF = let K = Symmetric([matern(x, y, TRU_P_SURF) for x in PTS_SURF, y in PTS_SURF])
Kf = cholesky(K)
Kf.L*randn(SEED, size(Kf, 2))
end
_nll(p) = nll_replicates(p, PTS, SIMS)
_nllh(p) = ForwardDiff.hessian(_nll, p)
function caching_nll(p, cache=nothing)
if !isnothing(cache)
push!(cache, deepcopy(p))
end
_nll(p)
end
const HIGHFD = central_fdm(10,1)
function high_fd_hessian(p)
gfun = _p -> FiniteDifferences.grad(HIGHFD, _nll, _p)
FiniteDifferences.jacobian(HIGHFD, gfun, p)[1]
end
no_hessian(x) = throw(error("This function should not have been called!"))
grad_fd!(p, store) = gradient_replicates!(store, FD_DFNS, p, PTS, SIMS)
grad_ad!(p, store) = ForwardDiff.gradient!(store, _nll, p)
hessfd(p) = hessian_replicates(FD_DFNS, FD_D2FNS, p, PTS, SIMS)
fishfd(p) = efish_replicates(FD_DFNS, p, PTS, length(SIMS))
fishad(p) = efish_replicates(AD_DFNS, p, PTS, length(SIMS))
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1811 |
# Bring a few packages into scope:
using BesselK, ForwardDiff, StaticArrays
import BesselK: gamma, adbesselkxv
# Matern covariance function, using the rescaled (x^v)*besselk(v, x):
function matern(x, y, params)
(sg, rho, nu) = params
dist = norm(x-y)
iszero(dist) && return sg*sg
arg = sqrt(2*nu)*dist/rho
(sg*sg*(2^(1-nu))/gamma(nu))*adbesselkxv(nu, arg)
end
# Slightly fancy matrix assembly and factorization:
function assemble_matrix(points, params)
buf = Matrix{eltype(params)}(undef, length(points), length(points))
Threads.@threads for k in eachindex(points) # nice multi-threading
ptk = points[k]
buf[k,k] = matern(ptk, ptk, params)
@inbounds for j in 1:(k-1) # turns off array bounds checking
buf[j,k] = matern(points[j], ptk, params)
end
end
cholesky!(Symmetric(buf)) # in-place Cholesky factorization to avoid heap allocs.
end
# Negative log-likelihood, with a slight trick for the quadratic form to just
# have to solve with the triangular factor once:
function nll(points, data, params)
Sigma = assemble_matrix(points, params)
(logdet(Sigma) + sum(abs2, Sigma.U'\data))/2
end
# Sample locations and data, using stack-allocated arrays via StaticArrays.jl to
# make sure that the autodiff derivatives don't make any heap allocations.
# This is just a random example to demonstrate how to create the single-arg
# closure that you can pass to ForwardDiff.
const LOCS = [@SVector rand(2) for _ in 1:1000]
const DAT = randn(length(LOCS))
# create single-argument closure for the log-likelihood:
objective(p) = nll(LOCS, DAT, p)
# AD-generated gradient and hessian. It's that easy! Plug in to your favorite
# optimizer and you're good to go.
objective_grad(p) = ForwardDiff.gradient(objective, p)
objective_hess(p) = ForwardDiff.hessian(objective, p)
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 785 |
include("shared.jl")
include("../plotting/gnuplot_utils.jl")
BLAS.set_num_threads(1)
# Small neighborhood of the MLE.
const RANGE_GRID = range(0.25, 5.0, length=80)
const SMOOTH_GRID = range(1.2, 1.5, length=70)
function gensurf()
out = zeros(length(RANGE_GRID), length(SMOOTH_GRID))
Threads.@threads for j in eachindex(RANGE_GRID)
@inbounds for k in eachindex(SMOOTH_GRID)
out[j,k] = profile_nll_replicates((RANGE_GRID[j], SMOOTH_GRID[k]),
PTS_SURF, (SIM_SURF,))
end
end
out .- minimum(out)
end
# Will need to figure out the right color scale here.
if !isinteractive()
const surf = gensurf()
gnuplot_save_matrix!("../plotdata/profile_surface.csv", surf,
RANGE_GRID, SMOOTH_GRID)
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1285 |
using Ipopt, Printf, Serialization
# You need to run fit.jl before running this script.
include("shared.jl")
const res = deserialize("fit_results.serialized")
function printf_sym_matrix(M::Matrix{Float64})
(s1, s2) = size(M)
for j in 1:(s1-1)
for k in 1:(s2-1)
if k >= j
@printf "%1.2f & " M[j,k]
else
@printf "\\cdot & "
end
end
@printf "%1.2f\\\\ \n" M[j,end]
end
for k in 1:(s2-1)
@printf "\\cdot & "
end
@printf "%1.2f" M[end,end]
end
function write_matrix_to_file(fname, M, prefix="../../../../manuscript/tables/")
open(prefix*fname, "w") do io
redirect_stdout(io) do
printf_sym_matrix(M)
end
end
end
#=
# look at hess vs e-fish at initializer:
write_matrix_to_file("fdefish_at_ones.tex", fishfd(ones(3)))
write_matrix_to_file("fdhess_at_ones.tex", hessfd(ones(3)))
write_matrix_to_file("adhess_at_ones.tex", _nllh(ones(3)))
write_matrix_to_file("refhess_at_ones.tex", high_fd_hessian(ones(3)))
# look at hess vs e-fish at MLE:
write_matrix_to_file("fdefish_at_mle.tex", fishfd(res[3][4]))
write_matrix_to_file("fdhess_at_mle.tex", hessfd(res[3][4]))
write_matrix_to_file("adhess_at_mle.tex", _nllh(res[3][4]))
write_matrix_to_file("refhess_at_mle.tex", high_fd_hessian(res[3][4]))
=#
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 927 |
function tablesave!(name, M::Matrix; rlabels=nothing, clabels=nothing, siunits=false)
out = siunits ? map(x->"\\num{"*string(x)*"}", M) : string.(M)
if !isnothing(rlabels)
out = hcat(string.(rlabels), out)
end
if !isnothing(clabels)
is_row_nothing = isnothing(rlabels)
rw1 = string.(clabels)
if !is_row_nothing
pushfirst!(rw1, "")
end
rw1[end] = rw1[end] * "\\\\"
writedlm("header_"*name, reshape(rw1, 1, length(rw1)), '&')
end
out[:,end] .= out[:,end] .* repeat(["\\\\"], size(out,1))
writedlm(name, out, '&')
end
function gnuplot_save_matrix!(name, M::Matrix{Float64}, row_pts, col_pts, delim=',')
out = Array{Any}(undef, size(M,1)+1, size(M,2)+1)
out[1,1] = ""
out[1,2:end] .= col_pts
out[2:end,1] .= row_pts
out[2:end, 2:end] .= M
writedlm(name, out, delim)
end
function gnuplot_save_vector!(name, M, pts, delim=',')
writedlm(name, hcat(pts, M), delim)
end
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1539 |
using BesselK, SpecialFunctions, ArbNumerics
include("shared.jl")
function wbesselk(v,x)
try
return BesselK._besselk(v,x)
catch
return NaN
end
end
rbesselk(v,x) = Float64(ArbNumerics.besselk(ArbFloat(v), ArbFloat(x)))
abesselk(v,x) = SpecialFunctions.besselk(v, x)
const BASELINE = [rbesselk(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const AMOS = [abesselk(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const OURSOL = [wbesselk(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const TOLS_A = atolfun.(zip(BASELINE, AMOS))
const TOLS_U = atolfun.(zip(BASELINE, OURSOL))
const TOLS_AU = rtolfun.(zip(AMOS, OURSOL))
const RTOLS_A = rtolfun.(zip(BASELINE, AMOS))
const RTOLS_U = rtolfun.(zip(BASELINE, OURSOL))
@assert iszero(length(findall(isnan, OURSOL))) "There were NaNs in our attempts!"
# quick simple test to find the worst atol when B(x) < 1:
let ix = findall(x->x<=one(x), BASELINE)
res = findmax(abs, TOLS_U[ix])
_ix = res[2]
println("Worst atol when true besselk(v,x) <= 1: $(res[1])")
println("True value: $(BASELINE[ix][_ix])")
end
gnuplot_save_matrix!("../plotdata/atols_amos.csv", TOLS_A, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/atols_ours.csv", TOLS_U, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/rtols_amos.csv", RTOLS_A, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/rtols_ours.csv", RTOLS_U, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/rtols_amos_ours.csv", TOLS_AU, VGRID, XGRID)
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1100 |
using SpecialFunctions, ForwardDiff, FiniteDifferences
include("shared.jl")
const BIG_FD = central_fdm(10,1)
dwbesselk(v,x) = ForwardDiff.derivative(_v->BesselK._besselk(_v,x,100,1e-12,false), v)
dbesselk(v, x) = BIG_FD(_v->besselk(_v,x), v)
const _h = 1e-6
fastfdbesselk(v, x) = (besselk(v+_h, x) - besselk(v, x))/_h
const BASELINE = [dbesselk(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const OURSOL = [dwbesselk(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const FASTFD = [fastfdbesselk(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const TOLS = atolfun.(zip(BASELINE, OURSOL))
const FDTOLS = atolfun.(zip(BASELINE, FASTFD))
const DIFTOLS = log10.(TOLS) .- log10.(FDTOLS)
let res = findmax(DIFTOLS)
println("Worst case derivative difference: $(res[1])")
println("Value of dfun: $(BASELINE[res[2]])")
end
gnuplot_save_matrix!("../plotdata/atols_deriv_fd.csv", FDTOLS, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/atols_deriv_ad.csv", TOLS, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/atols_deriv_fdad.csv", DIFTOLS, VGRID, XGRID)
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1283 |
using SpecialFunctions, ForwardDiff, FiniteDifferences
include("shared.jl")
const BIG_FD = central_fdm(10,1)
const BIG_FD_O2 = central_fdm(10,2)
besselkdv(v,x) = ForwardDiff.derivative(_v->BesselK._besselk(_v,x,100,1e-12,false), v)
besselkdv2(v,x) = ForwardDiff.derivative(_v->besselkdv(_v,x), v)
dbesselkdv(v, x) = BIG_FD(_v->besselk(_v,x), v)
dbesselkdv2(v, x) = BIG_FD_O2(_v->besselk(_v,x), v)
fastdbesselkdv2(v, x) = (besselk(v+2e-6, x) - 2*besselk(v+1e-6, x) + besselk(v, x))/1e-12
const BASELINE = [dbesselkdv2(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const OURSOL = [besselkdv2(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const FASTFD = [fastdbesselkdv2(z[1], z[2]) for z in Iterators.product(VGRID, XGRID)]
const TOLS = atolfun.(zip(BASELINE, OURSOL))
const FDTOLS = atolfun.(zip(BASELINE, FASTFD))
const DIFTOLS = log10.(TOLS) .- log10.(FDTOLS)
let res = findmax(DIFTOLS)
println("Worst case derivative difference: $(res[1])")
println("Value of dfun: $(BASELINE[res[2]])")
end
gnuplot_save_matrix!("../plotdata/atols_deriv2_fd.csv", FDTOLS, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/atols_deriv2_ad.csv", TOLS, VGRID, XGRID)
gnuplot_save_matrix!("../plotdata/atols_deriv2_fdad.csv", DIFTOLS, VGRID, XGRID)
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
|
[
"MIT"
] | 0.5.6 | 0a2aba1fa92200ac4ecd1c49b9a73100e4b34816 | code | 1097 |
include("../../examples/matern.jl")
include("../plotting/gnuplot_utils.jl")
const VGRID = range(0.25, 10.0, length=101) # to hit "near-integer" v, which is the hardest.
const XGRID = range(0.0, 30.0, length=201)[2:end]
# First and second derivatives with FD:
fdbesselkdv(v, x) = (besselk(v+1e-6, x) - besselk(v+1e-6, x))/1e-6
fdbesselkdvdv(v, x) = (besselk(v+2e-6, x) - 2*besselk(v+1e-6, x) + besselk(v, x))/1e-12
# First and second derivatives with AD:
adbesselkdv(v, x) = ForwardDiff.derivative(_v->BesselK._besselk(_v, x), v)
adbesselkdvdv(v, x) = ForwardDiff.derivative(_v->adbesselkdv(_v, x), v)
function assemble_matrix(fn, pts, p)
out = Array{Float64}(undef, length(pts), length(pts))
Threads.@threads for k in 1:length(pts)
out[k,k] = p[1]
@inbounds for j in 1:(k-1)
out[j,k] = fn(pts[j], pts[k], p)
end
end
Symmetric(out)
end
atolfun(tru, est) = isnan(est) ? NaN : (isinf(tru) ? 0.0 : abs(tru-est))
atolfun(tru_est) = atolfun(tru_est[1], tru_est[2])
rtolfun(tru, est) = atolfun(tru, est)/abs(tru)
rtolfun(tru_est) = rtolfun(tru_est[1], tru_est[2])
| BesselK | https://github.com/cgeoga/BesselK.jl.git |
Subsets and Splits