licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 2.0.0 | d50c73e4abd8f7c58eb76a8884dfd531fa8dac81 | docs | 313 | # HistoricalStdlibVersions.jl
Loads historical stdlib version information into Pkg to allow Pkg to resolve stdlib versions for prior julia versions.
Usage
```julia
julia> import Pkg, HistoricalStdlibVersions
julia> append!(empty!(Pkg.Types.STDLIBS_BY_VERSION), HistoricalStdlibVersions.STDLIBS_BY_VERSION)
```
| HistoricalStdlibVersions | https://github.com/JuliaPackaging/HistoricalStdlibVersions.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | code | 450 | using Documenter, CuFluxSampler
makedocs(
modules = [CuFluxSampler],
clean = false,
format = Documenter.HTML(),
sitename = "CuFluxSampler.jl",
linkcheck = false,
pages = ["README" => "index.md"; "Reference" => "reference.md"],
strict = [:missing_docs, :cross_references],
)
deploydocs(
repo = "github.com/LCSB-BioCore/CuFluxSampler.jl.git",
target = "build",
branch = "gh-pages",
push_preview = false,
)
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | code | 4776 | module ACHR
using ..CUDA
using ..DocStringExtensions
using SparseArrays
import ..COBREXA
import ..TeaRNG
import Random
"""
$(TYPEDSIGNATURES)
A traditional artificially-centered hit-and-run algorithm that starts with
`start` points.
Refer to the documentation in module AffineHR for the meaning of arguments.
"""
function sample(
m::COBREXA.MetabolicModel,
start::AbstractMatrix;
iters::Int,
bound_stoichiometry::Bool = false,
check_stoichiometry::Bool = true,
direction_noise_max::Union{Nothing,Float32} = nothing,
epsilon::Float32 = 1.0f-5,
seed = Random.rand(UInt32),
)
# allocate base helper variables
npts = size(start, 2)
pts = cu(Matrix{Float32}(start))
dirs = CUDA.zeros(size(start, 1), npts)
lblambdas = CUDA.zeros(size(dirs))
ublambdas = CUDA.zeros(size(dirs))
lmins = CUDA.zeros(size(dirs))
lmaxs = CUDA.zeros(size(dirs))
lmin = CUDA.zeros(1, npts)
lmax = CUDA.zeros(1, npts)
lws = CUDA.zeros(1, npts)
oks = CUDA.zeros(Bool, 1, npts)
# extract model data
S = CUDA.CUSPARSE.CuSparseMatrixCSR(Float32.(COBREXA.stoichiometry(m)))
lbs, ubs = cu.(COBREXA.bounds(m))
if check_stoichiometry || bound_stoichiometry
b = cu(collect(COBREXA.balance(m)))
end
# conditional parts/allocations
if bound_stoichiometry
btmp = CUDA.zeros(length(b), npts)
bdirs = CUDA.zeros(size(btmp))
blblambdas = CUDA.zeros(size(btmp))
bublambdas = CUDA.zeros(size(btmp))
blmins = CUDA.zeros(size(btmp))
blmaxs = CUDA.zeros(size(btmp))
end
bound_coupling = COBREXA.n_coupling_constraints(m) > 0
if bound_coupling
C = CUDA.CUSPARSE.CuSparseMatrixCSR(Float32.(COBREXA.coupling(m)))
clbs, cubs = cu.(COBREXA.coupling_bounds(m))
ctmp = CUDA.zeros(size(C, 1), npts)
cdirs = CUDA.zeros(size(ctmp))
clblambdas = CUDA.zeros(size(ctmp))
cublambdas = CUDA.zeros(size(ctmp))
clmins = CUDA.zeros(size(ctmp))
clmaxs = CUDA.zeros(size(ctmp))
end
add_noise = !isnothing(direction_noise_max)
if add_noise
noise_offset = -direction_noise_max
noise_scale = 2.0f0 * direction_noise_max
end
# swap buffer for pts
newpts = CUDA.zeros(size(pts))
# run the iterations
for iter = 1:iters
dirs .= (sum(pts; dims = 2) ./ npts) .- pts
if add_noise
@cuda threads = 256 blocks = 32 TeaRNG.device_add_unif_rand!(
dirs,
UInt32(seed + UInt32(iter * 2)),
noise_offset,
noise_scale,
)
end
# unit-size directions
dirs ./= sqrt.(sum(dirs .^ 2, dims = 1))
# compute lower and upper bounds on lambdas by variable bounds
lblambdas .= (lbs .- pts) ./ dirs
ublambdas .= (ubs .- pts) ./ dirs
lmins .= min.(lblambdas, ublambdas)
lmaxs .= max.(lblambdas, ublambdas)
lmin .= maximum(ifelse.(isfinite.(lmins), lmins, -Inf32), dims = 1)
lmax .= minimum(ifelse.(isfinite.(lmaxs), lmaxs, Inf32), dims = 1)
if bound_stoichiometry
btmp .= S * pts .- b
bdirs .= S * dirs
blblambdas .= (-epsilon .- btmp) ./ bdirs
bublambdas .= (epsilon .- btmp) ./ bdirs
blmins .= min.(blblambdas, bublambdas)
blmaxs .= max.(blblambdas, bublambdas)
lmin .=
max.(lmin, maximum(ifelse.(isfinite.(blmins), blmins, -Inf32), dims = 1))
lmax .= min.(lmax, minimum(ifelse.(isfinite.(blmaxs), blmaxs, Inf32), dims = 1))
end
if bound_coupling
ctmp .= S * pts
cdirs .= S * dirs
clblambdas .= (clbs .- ctmp .- epsilon) ./ cdirs
cublambdas .= (cubs .- ctmp .+ epsilon) ./ cdirs
clmins .= min.(clblambdas, cublambdas)
clmaxs .= max.(clblambdas, cublambdas)
lmin .=
max.(lmin, maximum(ifelse.(isfinite.(clmins), clmins, -Inf32), dims = 1))
lmax .= min.(lmax, minimum(ifelse.(isfinite.(clmaxs), clmaxs, Inf32), dims = 1))
end
# generate random lambdas and compute new points
@cuda threads = 256 blocks = 32 TeaRNG.device_fill_rand!(
lws,
seed + UInt32(iter * 2 + 1),
)
newpts .= pts + dirs .* (lmin .+ lws .* (lmax .- lmin))
oks .= all((newpts .>= lbs) .& (newpts .<= ubs), dims = 1)
if check_stoichiometry
# check if the new points balance is within the equality bounds
oks .&= (sum((S * newpts .- b) .^ 2, dims = 1) .< epsilon)
end
pts .= ifelse.(oks, newpts, pts)
end
collect(pts)
end
end # module AffineHR
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | code | 6344 | module AffineHR
using ..CUDA
using ..DocStringExtensions
using SparseArrays
import ..COBREXA
import ..TeaRNG
import Random
function random_mix_matrix(npts, mix_points)
mtx = sparse(
Random.rand(1:npts, npts * mix_points),
repeat(1:npts, inner = mix_points),
Random.rand(npts * mix_points),
npts,
npts,
)
mtx ./ sum(mtx, dims = 1)
end
function random_permute_matrix(npts)
sparse(1:npts, Random.randperm(npts), 1.0, npts, npts)
end
"""
$(TYPEDSIGNATURES)
Use the affine-combination hit-and-run algorithm to generate a sample of the
feasible area of `m` from the set of `start` points supplied as columns in a
matrix.
The run directions are generated from random affine combination of `mix_points`
points (by default 3); matrices `mix_mtx` and `permute_mtx` give fine control
about the mixing in the process. Preferably, this matrix is very sparse.
`check_stoichiometry` allows to turn on/off the filtering of generated points
based on whether they are close to the steady state (with tolerance `epsilon`).
`bound_stoichiometry` additionally computes run bounds based on the
steady-state region, and uses it to generate better runs. This is useful in
combination with `direction_noise_max` which may add a small noise to the
generated run directions, allowing the sampler to discover new directions
(potentially not obvious from warmup in `start`), but easily explodes without
limiting the directions.
Additional bounds on run ranges are taken from model coupling constraints, if
present.
If you are generating a sample of the optimal model solution, it is expected
that the optimum bound is already present in `m`.
Returns a matrix of the same size as `start`.
"""
function sample(
m::COBREXA.MetabolicModel,
start::AbstractMatrix;
iters::Int,
bound_stoichiometry::Bool = false,
check_stoichiometry::Bool = true,
direction_noise_max::Union{Nothing,Float32} = nothing,
epsilon::Float32 = 1.0f-5,
seed = Random.rand(UInt32),
mix_points = 3,
mix_mtx = random_mix_matrix(size(start, 2), mix_points),
)
# allocate base helper variables
npts = size(start, 2)
pts = cu(Matrix{Float32}(start))
dirs = CUDA.zeros(size(start, 1), npts)
lblambdas = CUDA.zeros(size(dirs))
ublambdas = CUDA.zeros(size(dirs))
lmins = CUDA.zeros(size(dirs))
lmaxs = CUDA.zeros(size(dirs))
lmin = CUDA.zeros(1, npts)
lmax = CUDA.zeros(1, npts)
lws = CUDA.zeros(1, npts)
oks = CUDA.zeros(Bool, 1, npts)
mix = CUDA.CUSPARSE.CuSparseMatrixCSR(Float32.(mix_mtx))
# extract model data
S = CUDA.CUSPARSE.CuSparseMatrixCSR(Float32.(COBREXA.stoichiometry(m)))
lbs, ubs = cu.(COBREXA.bounds(m))
if check_stoichiometry || bound_stoichiometry
b = cu(collect(COBREXA.balance(m)))
end
# conditional parts/allocations
if bound_stoichiometry
btmp = CUDA.zeros(length(b), npts)
bdirs = CUDA.zeros(size(btmp))
blblambdas = CUDA.zeros(size(btmp))
bublambdas = CUDA.zeros(size(btmp))
blmins = CUDA.zeros(size(btmp))
blmaxs = CUDA.zeros(size(btmp))
end
bound_coupling = COBREXA.n_coupling_constraints(m) > 0
if bound_coupling
C = CUDA.CUSPARSE.CuSparseMatrixCSR(Float32.(COBREXA.coupling(m)))
clbs, cubs = cu.(COBREXA.coupling_bounds(m))
ctmp = CUDA.zeros(size(C, 1), npts)
cdirs = CUDA.zeros(size(ctmp))
clblambdas = CUDA.zeros(size(ctmp))
cublambdas = CUDA.zeros(size(ctmp))
clmins = CUDA.zeros(size(ctmp))
clmaxs = CUDA.zeros(size(ctmp))
end
add_noise = !isnothing(direction_noise_max)
if add_noise
noise_offset = -direction_noise_max
noise_scale = 2.0f0 * direction_noise_max
end
# swap buffer for pts
newpts = CUDA.zeros(size(pts))
# run the iterations
for iter = 1:iters
dirs .= (pts * mix) .- pts
if add_noise
@cuda threads = 256 blocks = 32 TeaRNG.device_add_unif_rand!(
dirs,
UInt32(seed + UInt32(iter * 2)),
noise_offset,
noise_scale,
)
end
# unit-size directions
dirs ./= sqrt.(sum(dirs .^ 2, dims = 1))
# compute lower and upper bounds on lambdas by variable bounds
lblambdas .= (lbs .- pts) ./ dirs
ublambdas .= (ubs .- pts) ./ dirs
lmins .= min.(lblambdas, ublambdas)
lmaxs .= max.(lblambdas, ublambdas)
lmin .= maximum(ifelse.(isfinite.(lmins), lmins, -Inf32), dims = 1)
lmax .= minimum(ifelse.(isfinite.(lmaxs), lmaxs, Inf32), dims = 1)
if bound_stoichiometry
btmp .= S * pts .- b
bdirs .= S * dirs
blblambdas .= (-epsilon .- btmp) ./ bdirs
bublambdas .= (epsilon .- btmp) ./ bdirs
blmins .= min.(blblambdas, bublambdas)
blmaxs .= max.(blblambdas, bublambdas)
lmin .=
max.(lmin, maximum(ifelse.(isfinite.(blmins), blmins, -Inf32), dims = 1))
lmax .= min.(lmax, minimum(ifelse.(isfinite.(blmaxs), blmaxs, Inf32), dims = 1))
end
if bound_coupling
ctmp .= S * pts
cdirs .= S * dirs
clblambdas .= (clbs .- ctmp .- epsilon) ./ cdirs
cublambdas .= (cubs .- ctmp .+ epsilon) ./ cdirs
clmins .= min.(clblambdas, cublambdas)
clmaxs .= max.(clblambdas, cublambdas)
lmin .=
max.(lmin, maximum(ifelse.(isfinite.(clmins), clmins, -Inf32), dims = 1))
lmax .= min.(lmax, minimum(ifelse.(isfinite.(clmaxs), clmaxs, Inf32), dims = 1))
end
# generate random lambdas and compute new points
@cuda threads = 256 blocks = 32 TeaRNG.device_fill_rand!(
lws,
seed + UInt32(iter * 2 + 1),
)
newpts .= pts + dirs .* (lmin .+ lws .* (lmax .- lmin))
oks .= all((newpts .>= lbs) .& (newpts .<= ubs), dims = 1)
if check_stoichiometry
# check if the new points balance is within the equality bounds
oks .&= (sum((S * newpts .- b) .^ 2, dims = 1) .< epsilon)
end
newpts .= ifelse.(oks, newpts, pts)
pts .= newpts * mix
end
collect(pts)
end
end # module AffineHR
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | code | 232 | """
$(README)
"""
module CuFluxSampler
using DocStringExtensions
import COBREXA
import CUDA
import SparseArrays
include("TeaRNG.jl")
include("FullAffineHR.jl")
include("AffineHR.jl")
include("ACHR.jl")
end # module CuFluxSampler
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | code | 5538 | module FullAffineHR
using ..CUDA
using ..DocStringExtensions
import ..COBREXA
import ..TeaRNG
import Random
"""
$(TYPEDSIGNATURES)
Use the full-affine-combination hit-and-run algorithm to generate a sample of
the feasible area of `m` from the `warmup` points supplied as columns in a
matrix. If you are generating a sample of the optimal solution, it is expected
that the optimum bound is already present in `m`.
Returns a matrix of `npts` samples organized in columns.
This algorithm is mostly a toy for comparing the performance. It works, but do
not use it in production.
"""
function sample(
m::COBREXA.MetabolicModel,
warmup::AbstractMatrix;
npts::Int = size(warmup, 2),
iters::Int,
bound_stoichiometry::Bool = false,
check_stoichiometry::Bool = true,
direction_noise_max::Union{Nothing,Float32} = nothing,
epsilon::Float32 = 1.0f-5,
seed = Random.rand(UInt32),
)
# TODO seed and tea iters
# allocate base helper variables
base_points = cu(Matrix{Float32}(warmup))
ws = CUDA.zeros(size(base_points, 2), npts)
dirs = CUDA.zeros(size(base_points, 1), npts)
lblambdas = CUDA.zeros(size(dirs))
ublambdas = CUDA.zeros(size(dirs))
lmins = CUDA.zeros(size(dirs))
lmaxs = CUDA.zeros(size(dirs))
lmin = CUDA.zeros(1, npts)
lmax = CUDA.zeros(1, npts)
lws = CUDA.zeros(1, npts)
oks = CUDA.zeros(Bool, 1, npts)
# extract model data
S = CUDA.CUSPARSE.CuSparseMatrixCSR(Float32.(COBREXA.stoichiometry(m)))
lbs, ubs = cu.(COBREXA.bounds(m))
if check_stoichiometry || bound_stoichiometry
b = cu(collect(COBREXA.balance(m)))
end
# conditional parts/allocations
if bound_stoichiometry
btmp = CUDA.zeros(length(b), npts)
bdirs = CUDA.zeros(size(btmp))
blblambdas = CUDA.zeros(size(btmp))
bublambdas = CUDA.zeros(size(btmp))
blmins = CUDA.zeros(size(btmp))
blmaxs = CUDA.zeros(size(btmp))
end
bound_coupling = COBREXA.n_coupling_constraints(m) > 0
if bound_coupling
C = CUDA.CUSPARSE.CuSparseMatrixCSR(Float32.(COBREXA.coupling(m)))
clbs, cubs = cu.(COBREXA.coupling_bounds(m))
ctmp = CUDA.zeros(size(C, 1), npts)
cdirs = CUDA.zeros(size(ctmp))
clblambdas = CUDA.zeros(size(ctmp))
cublambdas = CUDA.zeros(size(ctmp))
clmins = CUDA.zeros(size(ctmp))
clmaxs = CUDA.zeros(size(ctmp))
end
add_noise = !isnothing(direction_noise_max)
if add_noise
noise_offset = -direction_noise_max
noise_scale = 2.0f0 * direction_noise_max
end
# pre-generate first batch of the points
@cuda threads = 256 blocks = 32 TeaRNG.device_fill_rand!(ws, UInt32(0))
pts = (base_points * ws) ./ sum(ws, dims = 1)
# swap buffer for pts
newpts = CUDA.zeros(size(pts))
# run the iterations
for iter = 1:iters
# make random point combinations and convert to directions
@cuda threads = 256 blocks = 32 TeaRNG.device_fill_rand!(
ws,
UInt32(seed + UInt32(iter * 3)),
)
dirs .= ((base_points * ws) ./ sum(ws, dims = 1)) .- pts
if add_noise
@cuda threads = 256 blocks = 32 TeaRNG.device_add_unif_rand!(
dirs,
UInt32(seed + UInt32(iter * 3 + 1)),
noise_offset,
noise_scale,
)
end
# unit-size directions
dirs ./= sqrt.(sum(dirs .^ 2, dims = 1))
# compute lower and upper bounds on lambdas by variable bounds
lblambdas .= (lbs .- pts) ./ dirs
ublambdas .= (ubs .- pts) ./ dirs
lmins .= min.(lblambdas, ublambdas)
lmaxs .= max.(lblambdas, ublambdas)
lmin .= maximum(ifelse.(isfinite.(lmins), lmins, -Inf32), dims = 1)
lmax .= minimum(ifelse.(isfinite.(lmaxs), lmaxs, Inf32), dims = 1)
if bound_stoichiometry
btmp .= S * pts .- b
bdirs .= S * dirs
blblambdas .= (-epsilon .- btmp) ./ bdirs
bublambdas .= (epsilon .- btmp) ./ bdirs
blmins .= min.(blblambdas, bublambdas)
blmaxs .= max.(blblambdas, bublambdas)
lmin .=
max.(lmin, maximum(ifelse.(isfinite.(blmins), blmins, -Inf32), dims = 1))
lmax .= min.(lmax, minimum(ifelse.(isfinite.(blmaxs), blmaxs, Inf32), dims = 1))
end
if bound_coupling
ctmp .= S * pts
cdirs .= S * dirs
clblambdas .= (clbs .- ctmp .- epsilon) ./ cdirs
cublambdas .= (cubs .- ctmp .+ epsilon) ./ cdirs
clmins .= min.(clblambdas, cublambdas)
clmaxs .= max.(clblambdas, cublambdas)
lmin .=
max.(lmin, maximum(ifelse.(isfinite.(clmins), clmins, -Inf32), dims = 1))
lmax .= min.(lmax, minimum(ifelse.(isfinite.(clmaxs), clmaxs, Inf32), dims = 1))
end
# generate random lambdas and compute new points
@cuda threads = 256 blocks = 32 TeaRNG.device_fill_rand!(
lws,
seed + UInt32(iter * 3 + 2),
)
newpts .= pts + dirs .* (lmin .+ lws .* (lmax .- lmin))
oks .= all((newpts .>= lbs) .& (newpts .<= ubs), dims = 1)
if check_stoichiometry
# check if the new points balance is within the equality bounds
oks .&= (sum((S * newpts .- b) .^ 2, dims = 1) .< epsilon)
end
pts .= ifelse.(oks, newpts, pts)
end
collect(pts)
end
end # module AffineHR
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | code | 1724 | """
Fast stateless random number generator for GPUs based on TEA cipher.
"""
module TeaRNG
using ..CUDA
using ..DocStringExtensions
"""
$(TYPEDSIGNATURES)
Use TEA cipher algorithm to reproducibly generate a `seq`-th random number from
the `stream`-th random stream.
"""
function tea_random(stream::UInt32, seq::UInt32)
v1 = stream
v2 = seq
s = 0x9E3779B9
for i = 1:8
v1 = UInt32(
v1 + xor(
UInt32(UInt32(v2 << 4) + 0xA341316C),
v2 + s,
UInt32((v2 >> 5) + 0xC8013EA4),
),
)
v2 = UInt32(
v2 + xor(
UInt32(UInt32(v1 << 4) + 0xAD90777D),
v1 + s,
UInt32((v1 >> 5) + 0x7E95761E),
),
)
s = UInt32(s + 0x9E3779B9)
end
return v1
end
"""
$(TYPEDSIGNATURES)
CUDA.jl grid-stride kernel that fills the array with random numbers generated
by [`tea_random`](@ref). `seed` is used as the `stream` ID, global thread index
in grid is used as the `seq`uence number.
"""
function device_fill_rand!(arr, seed::UInt32)
index = threadIdx().x + blockDim().x * (blockIdx().x - 1)
stride = gridDim().x * blockDim().x
for i = index:stride:length(arr)
arr[i] = Float32(tea_random(seed, UInt32(i))) / Float32(0x100000000)
end
return
end
function device_add_unif_rand!(arr, seed::UInt32, offset::Float32, scale::Float32)
index = threadIdx().x + blockDim().x * (blockIdx().x - 1)
stride = gridDim().x * blockDim().x
for i = index:stride:length(arr)
arr[i] +=
offset + scale * (Float32(tea_random(seed, UInt32(i))) / Float32(0x100000000))
end
return
end
end # module TeaRng
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | code | 554 |
@testset "Full-affine-combination hit-and-run" begin
m = load_model(df("e_coli_core.xml"))
warmup = warmup_from_variability(m, GLPK.Optimizer)
sample = CuFluxSampler.AffineHR.sample(
m,
warmup,
iters = 100,
bound_stoichiometry = true,
check_stoichiometry = true,
direction_noise_max = 1.0f-5,
epsilon = 1.0f-5,
)
lbs, ubs = bounds(m)
@test all(sample .>= lbs .- 1.0f-5)
@test all(sample .<= ubs .+ 1.0f-5)
@test all((stoichiometry(m) * sample) .^ 2 .< 1e-5)
end
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | code | 946 |
using Downloads
using SHA
function check_data_file_hash(path, expected_checksum)
actual_checksum = bytes2hex(sha256(open(path)))
if actual_checksum != expected_checksum
@error "The downloaded data file `$path' seems to be different from the expected one. Tests will likely fail." actual_checksum expected_checksum
end
end
function download_data_file(url, path, hash)
if isfile(path)
check_data_file_hash(path, hash)
@info "using cached `$path'"
return path
end
Downloads.download(url, path)
check_data_file_hash(path, hash)
return path
end
isdir("downloaded") || mkdir("downloaded")
df(s) = joinpath("downloaded", s)
model_paths = Dict{String,String}(
"e_coli_core.xml" => download_data_file(
"http://bigg.ucsd.edu/static/models/e_coli_core.xml",
df("e_coli_core.xml"),
"b4db506aeed0e434c1f5f1fdd35feda0dfe5d82badcfda0e9d1342335ab31116",
),
)
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | code | 176 |
using Test, CuFluxSampler
using COBREXA
using GLPK
include("data_downloaded.jl")
@testset "CuFluxSampler tests" begin
include("FullAffHR.jl")
include("xHR.jl")
end
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | code | 1528 |
@testset "General hit-and-run schemes" begin
m = load_model(df("e_coli_core.xml"))
start = warmup_from_variability(m, GLPK.Optimizer)
start = permutedims(repeat(start', 6)[1:1024, :], (2, 1))
lbs, ubs = bounds(m)
epsilon = 1.0f-5
@testset "module $(mod)" for mod in [CuFluxSampler.AffineHR, CuFluxSampler.ACHR]
@testset "stoichiometry bounding $bound_stoichiometry" for bound_stoichiometry in
[false, true]
@testset "stoichiometry checks $check_stoichiometry" for check_stoichiometry in
[false, true]
@testset "direction noise $direction_noise_max" for direction_noise_max in
[nothing, 1.0f-5]
sample = mod.sample(
m,
start;
iters = 10,
bound_stoichiometry,
check_stoichiometry,
direction_noise_max,
epsilon,
)
@test all(sample .>= lbs .- epsilon)
@test all(sample .<= ubs .+ epsilon)
if bound_stoichiometry || check_stoichiometry
@test all((stoichiometry(m) * sample) .^ 2 .< 1e-5)
end
end
end
end
end
end
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | docs | 2132 |
# CuFluxSampler.jl
| Documentation |
|:---:|
| [](https://lcsb-biocore.github.io/CuFluxSampler.jl/stable) [](https://lcsb-biocore.github.io/CuFluxSampler.jl/dev) |
Flux samplers for
[COBREXA.jl](https://github.com/LCSB-BioCore/COBREXA.jl/),
accelerated on GPUs via [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl).
The repository contains the following modules with samplers:
- Affine-combination-directed Hit&Run (module `CuFluxSampler.AffineHR`)
- Artificially-Centered Hit&Run (module `CuFluxSampler.ACHR`)
Both modules export a specific function for running the sampler atop COBREXA.jl
`MetabolicModel` structure, typically called `sample`. See the code comments
and documentation for details.
Samplers support many options that can be turned on and off, in general:
- Number of points used for mixing the new run directions in `AffineHR` may be
changed by `mix_points` parameter, and you can alternatively supply your own
mixing matrix in `mix_mtx`.
- You can turn on/off the stoichiometry checks with `check_stoichiometry` and
tune it with `epsilon` (in both `ACHR` and `AffineHR`)
- You can add tolerance bounds on stoichiometry in order to expand the feasible
region a little to allow randomized runs to succeed; see
`check_stoichiometry` and `direction_noise_max` parameters.
- You can set a seed for the GPU-generated random numbers using `seed`
Running the package code and tests requires a CUDA-capable GPU.
#### Acknowledgements
`CuFluxSampler.jl` was developed at the Luxembourg Centre for Systems
Biomedicine of the University of Luxembourg
([uni.lu/lcsb](https://www.uni.lu/lcsb)).
The development was supported by European Union's Horizon 2020 Programme under
PerMedCoE project ([permedcoe.eu](https://www.permedcoe.eu/)),
agreement no. 951773.
<img src="docs/src/assets/unilu.svg" alt="Uni.lu logo" height="64px"> <img src="docs/src/assets/lcsb.svg" alt="LCSB logo" height="64px"> <img src="docs/src/assets/permedcoe.svg" alt="PerMedCoE logo" height="64px">
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | docs | 93 |
# CuFluxSampler.jl
```@autodocs
Modules = [CuFluxSampler]
Pages = ["CuFluxSampler.jl"]
```
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | 025ad27d907e845c3563fd02211d14f4ed2e32f3 | docs | 565 |
# Reference
## Artificially centered hit-and-run sampling
```@autodocs
Modules = [CuFluxSampler, CuFluxSampler.ACHR]
Pages = ["ACHR.jl"]
```
## Affine hit-and-run sampling
```@autodocs
Modules = [CuFluxSampler, CuFluxSampler.AffineHR]
Pages = ["AffineHR.jl"]
```
## Full-linear-combination affine hit-and-run sampling (internal)
```@autodocs
Modules = [CuFluxSampler, CuFluxSampler.FullAffineHR]
Pages = ["FullAffineHR.jl"]
```
## Internal functions
### TEA-based RNG
```@autodocs
Modules = [CuFluxSampler, CuFluxSampler.TeaRNG]
Pages = ["TeaRNG.jl"]
```
| CuFluxSampler | https://github.com/LCSB-BioCore/CuFluxSampler.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 455 | using ExoplanetsSysSim
start_dir = pwd()
data_dir_home = joinpath(dirname(pathof(ExoplanetsSysSim)),"..")
pkg_dev_home = joinpath(dirname(pathof(ExoplanetsSysSim)),"..")
cd(pkg_dev_home)
#= Disabled to prevent merge conflicts
println("# Pulling most recent version, just to be sure.")
flush(stdout)
run(`git pull`)
=#
# Disabled, so can create modules manually in another directory and make data a symlink
# include("init_modules.jl")
cd(start_dir)
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 1021 | using ExoplanetsSysSim
start_dir = pwd()
data_dir = joinpath(dirname(pathof(ExoplanetsSysSim)),"..","data")
pkg_dev_home = joinpath(dirname(pathof(ExoplanetsSysSim)),"..")
cd(pkg_dev_home)
if true
if !isfile(".gitmodules")
println("# Adding [email protected]:ExoJulia/SysSimData.git a submodule in the data directory...")
flush(stdout)
run(`git submodule add [email protected]:ExoJulia/SysSimData.git data`)
end
println("# Initializing the data submodule...")
flush(stdout)
run(`git submodule init`)
run(`git submodule update`)
cd("data")
println("# Initializing data's submodules...")
flush(stdout)
run(`git submodule init`)
println("# git lfs pull just in case binary files not downloaded already...")
flush(stdout)
run(`git lfs pull`)
cd("..")
end
println("# Recursively updating submodules...")
flush(stdout)
run(`git submodule update --recursive`)
if data_dir != joinpath(pkg_dev_home,"data")
cd(pkg_dev_home)
symlink(data_dir,"data")
end
cd(start_dir)
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 4324 | ## ExoplanetsSysSim/examples/dr25_gaia_fgk/abc_run.jl
## (c) 2019 Danley C. Hsu & Eric B. Ford
# Script for producing DR25 FGK planet candidate occurrence rate estimates
include("abc_setup.jl")
#using SysSimABC
using ExoplanetsSysSim
using JLD
using StatsBase
out2txt = false # Write occurrence rates & densities to text files
expandpart = true # Expand final generation for robust posteriors
prior_choice = "uniform"
bin_size_factor = 2.0
println("Setting up simulation...")
@time abc_plan = SysSimABC.setup_abc(prior_choice = prior_choice, bin_size_factor = bin_size_factor)
println("")
println("Running simulation...")
@time output = SysSimABC.run_abc(abc_plan)
println("")
println("Running simulation (part 2)...")
@time abc_plan = SysSimABC.setup_abc_p2(abc_plan)
@time output = SysSimABC.run_abc(abc_plan, output)
#@time abc_plan = change_distance()
#@time output = run_abc(abc_plan, output)
println("")
save(string("test-pop-out.jld"), "output", output, "ss_true", EvalSysSimModel.get_ss_obs())
if expandpart
println("Expanding to large generation...")
@time theta_largegen, weights_largegen = SysSimABC.run_abc_largegen(abc_plan, output, EvalSysSimModel.get_ss_obs(), output.accept_log.epsilon[end-1], npart=200)
println("")
save(string("test-pop-out.jld"), "output", output, "ss_true", EvalSysSimModel.get_ss_obs(), "theta_largegen", theta_largegen, "weights_largegen", weights_largegen)
end
if out2txt
file_rate = open("rate_output.txt", "w")
file_dens = open("dens_output.txt", "w")
end
limitP = get_any(EvalSysSimModel.sim_param_closure, "p_lim_arr", Array{Float64,1})
limitR = get_any(EvalSysSimModel.sim_param_closure, "r_lim_arr", Array{Float64,1})
const r_dim = length(limitR)-1
if expandpart
weight_vec = aweights(weights_largegen)
#weight_vec = aweights(fill(1.0, length(weights_largegen)))
else
weight_vec = aweights(output.weights)
#weight_vec = aweights(fill(1.0, length(output.weights)))
end
for p_ind = 1:(length(limitP)-1)
col_ind = (p_ind-1)*(r_dim)+1
for r_ind = 1:r_dim
bin_ind = (p_ind-1)*(r_dim)+r_ind
dens_denom = 1.0/log(limitP[p_ind+1]/limitP[p_ind])/log(limitR[r_ind+1]/limitR[r_ind])
if prior_choice == "dirichlet" && r_dim > 1
col_ind = (p_ind-1)*(r_dim+1)+1
bin_ind = (p_ind-1)*(r_dim+1)+r_ind+1
if expandpart
quant_arr = quantile(theta_largegen[bin_ind,:].*theta_largegen[col_ind,:], weight_vec, [0.1587, 0.5, 0.8413])
else
quant_arr = quantile(output.theta[bin_ind,:].*output.theta[col_ind,:], weight_vec, [0.1587, 0.5, 0.8413])
end
elseif prior_choice == "beta"
col_lambda = bin_size_factor * 3 * log(limitP[p_ind+1]/limitP[p_ind])/log(2)
if expandpart
quant_arr = quantile(theta_largegen[bin_ind,:]*col_lambda, weight_vec, [0.1587, 0.5, 0.8413])
else
quant_arr = quantile(output.theta[bin_ind,:]*col_lambda, weight_vec, [0.1587, 0.5, 0.8413])
end
else
if expandpart
quant_arr = quantile(theta_largegen[bin_ind,:], weight_vec, [0.1587, 0.5, 0.8413])
else
quant_arr = quantile(output.theta[bin_ind,:], weight_vec, [0.1587, 0.5, 0.8413])
end
end
println("-----------------------------")
println("Orbital Period (day) = ", string(limitP[p_ind:p_ind+1]), " / Planet Radius (R_earth) = ", string(limitR[r_ind:r_ind+1]/ExoplanetsSysSim.earth_radius))
println("")
println("Rate = ", string(quant_arr[2], " + ", quant_arr[3]-quant_arr[2], " - ", quant_arr[2]-quant_arr[1]))
println("Density = ", string(quant_arr[2]*dens_denom, " + ", (quant_arr[3]-quant_arr[2])*dens_denom, " - ", (quant_arr[2]-quant_arr[1])*dens_denom))
if out2txt
write(file_rate, string(quant_arr[2], " + ", quant_arr[3]-quant_arr[2], " - ", quant_arr[2]-quant_arr[1], "\n"))
write(file_dens, string(quant_arr[2]*dens_denom, " + ", (quant_arr[3]-quant_arr[2])*dens_denom, " - ", (quant_arr[2]-quant_arr[1])*dens_denom, "\n"))
end
end
end
if out2txt
close(file_rate)
close(file_dens)
end
println("-----------------------------")
println("")
println(EvalSysSimModel.get_ss_obs())
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 12544 | ## ExoplanetsSysSim/examples/dr25_gaia_fgk/abc_setup.jl
## (c) 2019 Eric B. Ford & Danley C. Hsu
# Collection of functions which specific ABC simulation parameters
module EvalSysSimModel
export setup, get_param_vector, get_ss_obs
export gen_data, calc_summary_stats, calc_distance, is_valid_uniform, is_valid_beta, is_valid_dirichlet, normalize_dirch
using ExoplanetsSysSim
using ApproximateBayesianComputing
const ABC = ApproximateBayesianComputing
include(joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")),"examples","dr25_gaia_fgk", "dr25_binrates_func.jl"))
sim_param_closure = SimParam()
summary_stat_ref_closure = CatalogSummaryStatistics()
function is_valid_uniform(param_vector::Vector{Float64})
global sim_param_closure
update_sim_param_from_vector!(param_vector,sim_param_closure)
local rate_tab::Array{Float64,2} = get_any(sim_param_closure, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param_closure, "p_lim_arr", Array{Float64,1})
#const lambda = sum_kbn(rate_tab)
if any(x -> x < 0., rate_tab) || any([floor(3*log(limitP[i+1]/limitP[i])/log(2)) for i in 1:length(limitP)-1] .< sum(rate_tab, dims=1)')
return false
end
return true
end
function is_valid_beta(param_vector::Vector{Float64})
global sim_param_closure
update_sim_param_from_vector!(param_vector,sim_param_closure)
local rate_tab::Array{Float64,2} = get_any(sim_param_closure, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param_closure, "p_lim_arr", Array{Float64,1})
local bin_size_factor::Float64 = get_real(sim_param_closure, "bin_size_factor")
#const lambda = sum_kbn(rate_tab)
if any(x -> x < 0., rate_tab) || any([floor(3*log(limitP[i+1]/limitP[i])/log(2)) for i in 1:length(limitP)-1] .< (bin_size_factor*3*[log(limitP[i+1]/limitP[i])/log(2) for i in 1:length(limitP)-1].*sum(rate_tab, dims=1)'))
return false
end
return true
end
function is_valid_dirichlet(param_vector::Vector{Float64})
global sim_param_closure
update_sim_param_from_vector!(param_vector,sim_param_closure)
local rate_tab::Array{Float64,2} = get_any(sim_param_closure, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "p_lim_arr", Array{Float64,1})
#const lambda = sum_kbn(rate_tab)
if any(x -> x < 0., rate_tab) || any([floor(3*log(limitP[i+1]/limitP[i])/log(2)) for i in 1:length(limitP)-1] .< rate_tab[1,:])
return false
end
return true
end
function normalize_dirch(param_vector::Vector{Float64})
global sim_param_closure
local p_dim = length(get_any(sim_param_closure, "p_lim_arr", Array{Float64,1}))-1
local r_dim = length(get_any(sim_param_closure, "r_lim_arr", Array{Float64,1}))-1
for i in 1:p_dim
param_vector[((i-1)*(r_dim+1)+2):((i-1)*(r_dim+1)+(r_dim+1))] ./= sum(param_vector[((i-1)*(r_dim+1)+2):((i-1)*(r_dim+1)+(r_dim+1))])
end
update_sim_param_from_vector!(param_vector,sim_param_closure)
return param_vector
end
function gen_data(param_vector::Vector{Float64})
global sim_param_closure
update_sim_param_from_vector!(param_vector,sim_param_closure)
cat_phys = generate_kepler_physical_catalog(sim_param_closure)
#cat_phys_cut = ExoplanetsSysSim.generate_obs_targets(cat_phys, sim_param_closure)
#cat_obs = ExoplanetsSysSim.observe_kepler_targets_single_obs(cat_phys_cut, sim_param_closure)
cat_obs = ExoplanetsSysSim.observe_kepler_targets_sky_avg(cat_phys, sim_param_closure)
return cat_obs
end
# TODO OPT: Eventually, could adapt ABC.jl to use distance from first pass to decide if should compute additional summary statistics
function calc_summary_stats(cat::KeplerObsCatalog)
global sim_param_closure
sum_stat = calc_summary_stats_obs_binned_rates(cat, sim_param_closure, obs_skyavg = true)
return sum_stat
end
function calc_distance(sum_stat_obs::CatalogSummaryStatistics,sum_stat_sim::CatalogSummaryStatistics, n::Integer = 0)
global sim_param_closure
dist1 = calc_distance_vector_binned(sum_stat_obs,sum_stat_sim, 1, sim_param_closure)
num_available = length(dist1)
num_to_use = n>0 ? min(n,num_available) : num_available
return calc_scalar_distance(dist1[1:num_to_use])
end
function setup(prior_choice::String, bin_size_factor::Float64)
global sim_param_closure = setup_sim_param_dr25binrates()
add_param_fixed(sim_param_closure,"bin_size_factor",bin_size_factor)
if prior_choice == "dirichlet"
sim_param_closure = set_test_param_total(sim_param_closure)
add_param_fixed(sim_param_closure,"generate_num_planets",generate_num_planets_binrates_dirichlet)
if (length(get_any(sim_param_closure, "r_lim_arr", Array{Float64,1}))-1) > 1
add_param_fixed(sim_param_closure,"generate_period_and_sizes", generate_period_and_sizes_binrates_dirichlet)
end
elseif prior_choice == "beta"
sim_param_closure = set_test_param(sim_param_closure)
add_param_fixed(sim_param_closure,"generate_num_planets",generate_num_planets_binrates_beta)
add_param_fixed(sim_param_closure,"generate_period_and_sizes", generate_period_and_sizes_binrates_beta)
elseif prior_choice == "uniform"
sim_param_closure = set_test_param(sim_param_closure)
else
println("# Invalid prior given!")
quit()
end
### Use simulated planet candidate catalog data
# df_star = setup_star_table_christiansen(sim_param_closure)
# println("# Finished reading in stellar data")
# add_param_fixed(sim_param_closure,"num_kepler_targets",1000000) # For "observed" catalog
# cat_obs = simulated_read_kepler_observations(sim_param_closure)
# println("# Finished setting up simulated true catalog")
###
### Use real planet candidate catalog data
df_koi,usable_koi = read_koi_catalog(sim_param_closure)
println("# Finished reading in KOI data")
df_star = setup_star_table_dr25(sim_param_closure)
println("# Finished reading in stellar data")
cat_obs = setup_actual_planet_candidate_catalog(df_star, df_koi, usable_koi, sim_param_closure)
println("# Finished setting up true catalog")
###
global summary_stat_ref_closure = calc_summary_stats_obs_binned_rates(cat_obs,sim_param_closure, trueobs_cat = true)
end
get_param_vector() = make_vector_of_sim_param(sim_param_closure)
get_ss_obs() = summary_stat_ref_closure
function set_simparam_ss(sim_param::ExoplanetsSysSim.SimParam, ss_true::ExoplanetsSysSim.CatalogSummaryStatistics)
global sim_param_closure = sim_param
global summary_stat_ref_closure = ss_true
end
end # module EvalSysSimModel
#include(joinpath(Pkg.dir("ABC"),"src/composite.jl"))
module SysSimABC
export setup_abc, run_abc, run_abc_largegen, setup_abc_p2
using Distributions, Random, Distributed
using ApproximateBayesianComputing
const ABC = ApproximateBayesianComputing
import ApproximateBayesianComputing.CompositeDistributions.CompositeDist
import ApproximateBayesianComputing.TransformedBetaDistributions.LinearTransformedBeta
#using Compat
import ExoplanetsSysSim
import ..EvalSysSimModel
include(joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")),"examples","dr25_gaia_fgk", "dr25_binrates_func.jl"))
include(joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")),"examples","dr25_gaia_fgk", "beta_proposal.jl"))
function setup_abc(num_dist::Integer = 0; prior_choice::String = "uniform", bin_size_factor::Float64 = 1.5)
EvalSysSimModel.setup(prior_choice, bin_size_factor)
limitP::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "p_lim_arr", Array{Float64,1})
limitR::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "r_lim_arr", Array{Float64,1})
limitR_full::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "r_lim_full", Array{Float64,1})
local r_dim = length(limitR)-1
prior_arr = ContinuousDistribution[]
ss_obs_table = EvalSysSimModel.get_ss_obs().stat["planets table"]
if prior_choice == "dirichlet"
weights_arr = [log(limitR[j+1]/limitR[j]) for j in 1:r_dim]/minimum([log(limitR_full[k+1]/limitR_full[k]) for k in 1:(length(limitR_full)-1)])
for i in 1:(length(limitP)-1)
max_in_col = 3*log(limitP[i+1]/limitP[i])/log(2)
lambda_col = Uniform(0.0, max_in_col)
prior_arr = vcat(prior_arr, lambda_col)
if r_dim > 1
dirch_dist = Dirichlet(weights_arr)
prior_arr = vcat(prior_arr, dirch_dist)
end
end
elseif prior_choice == "beta"
for i in 1:(length(limitP)-1)
for j in 1:r_dim
r_ind = findfirst(x -> x == limitR[j], limitR_full)
beta_dist = Beta(log(limitR[j+1]/limitR[j]), sum(log.([getindex(limitR_full, x) for x = 2:length(limitR_full) if x != r_ind+1] ./ [getindex(limitR_full, x) for x = 1:length(limitR_full)-1 if x != r_ind])))
prior_arr = vcat(prior_arr, beta_dist)
end
end
else
for i in 1:(length(limitP)-1)
max_in_col = bin_size_factor*log(limitP[i+1]/limitP[i])/log(2)
for j in 1:r_dim
uniform_dist = Uniform(0.0, max_in_col*log(limitR[j+1]/limitR[j])/log(2))
prior_arr = vcat(prior_arr, uniform_dist)
end
end
end
param_prior = CompositeDist(prior_arr)
in_parallel = nworkers() > 1 ? true : false
calc_distance_ltd(sum_stat_obs::ExoplanetsSysSim.CatalogSummaryStatistics,sum_stat_sim::ExoplanetsSysSim.CatalogSummaryStatistics) = EvalSysSimModel.calc_distance(sum_stat_obs,sum_stat_sim,num_dist)
global abc_plan = ABC.abc_pmc_plan_type(EvalSysSimModel.gen_data,EvalSysSimModel.calc_summary_stats, calc_distance_ltd, param_prior, make_proposal_dist=make_proposal_dist_multidim_beta, is_valid=EvalSysSimModel.is_valid_uniform, num_part=100, num_max_attempt=200, num_max_times=15, epsilon_init=9.9e99, target_epsilon=1.0e-100, in_parallel=in_parallel, adaptive_quantiles = false, epsilon_reduction_factor=0.9, tau_factor=1.5);
if prior_choice == "dirichlet" && r_dim > 1
abc_plan.make_proposal_dist = make_proposal_dist_multidim_beta_dirichlet
abc_plan.is_valid = EvalSysSimModel.is_valid_dirichlet
abc_plan.normalize = EvalSysSimModel.normalize_dirch
elseif prior_choice == "beta"
abc_plan.is_valid = EvalSysSimModel.is_valid_beta
end
return abc_plan
end
function setup_abc_p2(abc_plan::ABC.abc_pmc_plan_type)
abc_plan.tau_factor = 2.0
abc_plan.num_max_times = 200
ExoplanetsSysSim.add_param_fixed(EvalSysSimModel.sim_param_closure,"num_targets_sim_pass_one",10000)
return abc_plan
end
function run_abc_largegen(abc_plan::ABC.abc_pmc_plan_type, pop::ABC.abc_population_type, ss_true::ExoplanetsSysSim.CatalogSummaryStatistics, epshist_targ::Float64; npart::Integer = 1000, num_dist::Integer = 0)
abc_plan.num_max_times = 1
println("# run_abc_largegen: ",EvalSysSimModel.sim_param_closure)
sampler_largegen = abc_plan.make_proposal_dist(pop, abc_plan.tau_factor)
theta_largegen = Array{Float64}(undef, size(pop.theta, 1), npart)
weight_largegen = Array{Float64}(undef, npart)
for i in 1:npart
theta_val, dist_largegen, attempts_largegen = ABC.generate_theta(abc_plan, sampler_largegen, ss_true, epshist_targ)
theta_largegen[:,i] = theta_val
prior_logpdf = Distributions.logpdf(abc_plan.prior,theta_val)
sampler_logpdf = Distributions.logpdf(sampler_largegen, theta_val)
weight_largegen[i] = exp(prior_logpdf-sampler_logpdf)
end
return theta_largegen, weight_largegen
end
function run_abc(abc_plan::ABC.abc_pmc_plan_type)
#global sim_param_closure
println("# run_abc: ",EvalSysSimModel.sim_param_closure)
ss_true = EvalSysSimModel.get_ss_obs()
#println("True catalog SS: ", ss_true)
pop_out = ABC.run_abc(abc_plan,ss_true;verbose=true)
end
function run_abc(abc_plan::ABC.abc_pmc_plan_type, pop::ABC.abc_population_type)
#global sim_param_closure
dist_threshold = maximum(pop.dist)
EvalSysSimModel.add_param_fixed(EvalSysSimModel.sim_param_closure,"minimum ABC dist skip pass 2",dist_threshold)
println("# run_abc: ",EvalSysSimModel.sim_param_closure)
ss_true = EvalSysSimModel.get_ss_obs()
pop_out = ABC.run_abc(abc_plan,ss_true,pop;verbose=true)
end
end # module SysSimABC
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 9597 | using ExoplanetsSysSim
using ApproximateBayesianComputing
const ABC = ApproximateBayesianComputing
using SpecialFunctions
using Statistics
import ApproximateBayesianComputing.CompositeDistributions.CompositeDist
import ApproximateBayesianComputing.TransformedBetaDistributions.LinearTransformedBeta
#import EvalSysSimModel
# https://en.wikipedia.org/wiki/Trigamma_function
function trigamma_x_gr_4(x::T) where T<: Real
1/x + 0.5/x^2 + 1/(6*x^3) - 1/(30*x^5) + 1/(42*x^7) - 1/(30*x^9) + 5/(66*x^11) - 691/(2730*x^13) + 7/(6*x^15)
end
function trigamma_x_lt_4(x::T) where T<: Real
n = floor(Int64,5-x)
z = x+n
val = trigamma_x_gr_4(z)
for i in 1:n
z -= 1
val += 1/z^2
end
val
end
function trigamma(x::T) where T<: Real
x >= 4 ? trigamma_x_gr_4(x) : trigamma_x_lt_4(x)
end
function var_weighted(x::AbstractArray{Float64,1}, w::AbstractArray{Float64,1} )
#println("# size(x) = ",size(x), " size(w) = ", size(w)); flush(stdout)
@assert(length(x)==length(w) )
sumw = sum(w)
@assert( sumw > 0. )
if(sumw!= 1.0)
w /= sum(w)
sumw = 1.0
end
sumw2 = sum(w.*w)
xbar = sum(x.*w)
covar = sum((x.-xbar).*(x.-xbar) .* w) * sumw/(sumw*sumw-sumw2)
end
function mom_alpha(x_bar::T, v_bar::T) where T<: Real
x_bar * (((x_bar * (1 - x_bar)) / v_bar) - 1)
end
function mom_beta(x_bar::T, v_bar::T) where T<: Real
(1 - x_bar) * (((x_bar * (1 - x_bar)) / v_bar) - 1)
end
# For algorithm, see https://scholarsarchive.byu.edu/cgi/viewcontent.cgi?article=2613&context=etd
function fit_beta_mle(x::AbstractArray{T,1}; tol::T = 1e-6, max_it::Int64 = 10, init_guess::AbstractArray{T,1} = Array{T}(undef,0), w::AbstractArray{T,1} = Array{T}(undef,0), verbose::Bool = false ) where T<: Real
lnxbar = length(w)>1 ? Statistics.mean(log.(x),AnalyticWeights(w)) : Compat.Statistics.mean(log.(x))
ln1mxbar = length(w)>1 ? Statistics.mean(log.(1.0.-x),AnalyticWeights(w)) : Compat.Statistics.mean(log.(1.0.-x))
function itterate( mle_guess::Vector{T} ) where T<:Real
(alpha, beta) = (mle_guess[1], mle_guess[2])
dgab = digamma(alpha+beta)
g1 = dgab - digamma(alpha) + lnxbar
g2 = dgab - digamma(beta) + ln1mxbar
tgab = trigamma(alpha+beta)
G = [dgab-trigamma(alpha) tgab; tgab tgab-trigamma(beta)]
mle_guess -= G \ [g1, g2]
end
local mle_new
if length(init_guess) != 2
xbar = length(w)>1 ? Compat.Statistics.mean(x,AnalyticWeights(w)) : Compat.Statistics.mean(x)
vbar = length(w)>1 ? Compat.Statistics.varm(x,xbar,AnalyticWeights(w)) : Compat.Statistics.varm(x,xbar)
mle_new = (vbar < xbar*(1.0-xbar)) ? [mom_alpha(xbar, vbar), mom_beta(xbar,vbar)] : ones(T,2)
else
mle_new = init_guess
end
if verbose
println("it = 0: ", mle_new)
end
if any(mle_new.<=zero(T))
println("# Warning: mean= ", xbar, " var= ",vbar," (alpha,beta)_init= ",mle_new," invalid, reinitializing to (1,1)")
verbose = true
mle_new = ones(T,2)
end
for i in 1:max_it
mle_old = mle_new
mle_new = itterate( mle_old )
epsilon = max(abs.(mle_old.-mle_new))
if verbose
println("# it = ", i, ": ", mle_new, " max(Delta alpha, Delta beta)= ", epsilon)
end
if epsilon < tol
break
end
end
return mle_new
end
function make_beta(x::AbstractArray{T,1}, w::AbstractArray{T,1};
xmean::T = Compat.Statistics.mean(x,AnalyticWeights(w)),
xvar::T = Compat.Statistics.varm(x,xmean,AnalyticWeights(w)), tau_factor::T=one(T) ) where T<:Real
alpha_beta = (xvar < xmean*(1.0-xmean)) ? [mom_alpha(xmean, xvar), mom_beta(xmean,xvar)] : ones(T,2)
if any(alpha_beta.<=zero(T))
alpha_beta = fit_beta_mle(x, w=w, init_guess=alpha_beta, verbose=true)
end
if any(alpha_beta.<=zero(T))
alpha_beta = ones(T,2)
else
if minimum(alpha_beta)>1.5*tau_factor && sum(alpha_beta)>=20.0*tau_factor
alpha_beta ./= tau_factor
end
end
#println("Radius relative: a= ",alpha_beta[1], " b= ",alpha_beta[2])
Beta(alpha_beta[1], alpha_beta[2])
end
function make_beta_transformed(x::AbstractArray{T,1}, w::AbstractArray{T,1}; xmin::T=zero(T), xmax::T=one(T), xmean::T = Compat.Statistics.mean(x,AnalyticWeights(w)), xvar::T = Compat.Statistics.varm(x,xmean,AnalyticWeights(w)), tau_factor::T=one(T) ) where T<:Real
alpha_beta = (xvar < xmean*(1.0-xmean)) ? [mom_alpha(xmean, xvar), mom_beta(xmean,xvar)] : ones(T,2)
if any(alpha_beta.<=zero(T))
alpha_beta = fit_beta_mle(x, w=w, init_guess=alpha_beta, verbose=true)
end
if any(alpha_beta.<=zero(T))
alpha_beta = ones(T,2)
else
if minimum(alpha_beta)>1.5*tau_factor && sum(alpha_beta)>=20.0*tau_factor
alpha_beta ./= tau_factor
end
end
#println("Total: a= ",alpha_beta[1], " b= ",alpha_beta[2])
LinearTransformedBeta(alpha_beta[1], alpha_beta[2], xmin=xmin, xmax=xmax)
end
function make_proposal_dist_multidim_beta(theta::AbstractArray{Float64,2}, weights::AbstractArray{Float64,1}, tau_factor::Float64; verbose::Bool = false)
global sim_param_closure
local limitP::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "p_lim_arr", Array{Float64,1})
local p_dim = length(limitP)-1
local r_dim = length(get_any(EvalSysSimModel.sim_param_closure, "r_lim_arr", Array{Float64,1}))-1
theta_mean = sum(theta.*weights',dims=2) # weighted mean for parameters
theta_var = ABC.var_weighted(theta'.-theta_mean',weights) # scaled, weighted covar for parameters
tau_factor_indiv = fill(tau_factor,length(theta_var))
dist_arr = ContinuousDistribution[]
for j in 1:p_dim
max_col_rate = 3*log(limitP[j+1]/limitP[j])/log(2)
col_startidx = (j-1)*r_dim+1
#tau_factor_indiv[col_startidx] = 2.0
#=
println("mean= ",theta_mean)
println("var= ",theta_var)
println("tau= ",tau_factor_indiv)
for i in 1:length(theta_mean)
println("a= ",alpha(theta_mean[i],tau_factor*theta_var[i]), " b= ",beta(theta_mean[i],tau_factor*theta_var[i]))
end
=#
dist_arr = vcat(dist_arr, ContinuousDistribution[make_beta_transformed(theta[i,:], weights, xmin=0.0, xmax=max_col_rate, xmean=theta_mean[i]/max_col_rate, xvar=theta_var[i]/max_col_rate^2, tau_factor=tau_factor_indiv[i]) for i in (col_startidx):(col_startidx+r_dim-1)])
end
dist = CompositeDist(dist_arr)
end
function make_proposal_dist_multidim_beta(pop::abc_population_type, tau_factor::Float64; verbose::Bool = false)
make_proposal_dist_multidim_beta(pop.theta, pop.weights, tau_factor, verbose=verbose)
end
function make_proposal_dist_multidim_beta_dirichlet(theta::AbstractArray{Float64,2}, weights::AbstractArray{Float64,1}, tau_factor::Float64; verbose::Bool = false)
global sim_param_closure
local limitP::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "p_lim_arr", Array{Float64,1})
local p_dim = length(limitP)-1
local r_dim = length(get_any(EvalSysSimModel.sim_param_closure, "r_lim_arr", Array{Float64,1}))-1
theta_mean = sum(theta.*weights',dims=2) # weighted mean for parameters
theta_var = ABC.var_weighted(theta'.-theta_mean',weights) # scaled, weighted covar for parameters
tau_factor_indiv = fill(tau_factor,length(theta_var))
dist_arr = ContinuousDistribution[]
for j in 1:p_dim
max_col_rate = 3*log(limitP[j+1]/limitP[j])/log(2)
col_startidx = (j-1)*(r_dim+1)+1
#tau_factor_indiv[col_startidx] = 2.0
# if verbose
# println("total: ",theta_mean[col_startidx]," ",theta_var[col_startidx])
# end
# for i in (col_startidx+1):(col_startidx+r_dim)
# mean_ratio = sum(theta[col_startidx,:].*theta[i,:].*weights) /(theta_mean[col_startidx]*theta_mean[i]) # weighted mean for parameters
# var_ratio = var_weighted(vec(theta[col_startidx,:].*theta[i,:]).-(theta_mean[col_startidx]*theta_mean[i]),weights)/(2 * theta_mean[col_startidx] * theta_var[i]) # scaled, weighted covar for parameters
# if verbose
# println("i=",i,": ",theta_mean[i]," ",theta_var[i]," ratios: ",mean_ratio, " ",var_ratio)
# end
# var_ratio = var_ratio >= one(var_ratio) ? var_ratio : one(var_ratio)
# tau_factor_indiv[i] = tau_factor*var_ratio
# end
# if verbose
# flush(stdout)
# end
#=
println("mean= ",theta_mean)
println("var= ",theta_var)
println("tau= ",tau_factor_indiv)
for i in 1:length(theta_mean)
println("a= ",alpha(theta_mean[i],tau_factor*theta_var[i]), " b= ",beta(theta_mean[i],tau_factor*theta_var[i]))
end
=#
dist_arr = vcat(dist_arr, make_beta_transformed(theta[col_startidx,:], weights, xmin=0.0, xmax=max_col_rate, xmean=theta_mean[col_startidx]/max_col_rate, xvar=theta_var[col_startidx]/max_col_rate^2, tau_factor=tau_factor_indiv[col_startidx]), ContinuousDistribution[ make_beta(theta[i,:], weights, xmean=theta_mean[i], xvar=theta_var[i], tau_factor=tau_factor_indiv[i]) for i in (col_startidx+1):(col_startidx+r_dim)])
end
dist = CompositeDist(dist_arr)
end
function make_proposal_dist_multidim_beta_dirichlet(pop::abc_population_type, tau_factor::Float64; verbose::Bool = false)
make_proposal_dist_multidim_beta_dirichlet(pop.theta, pop.weights, tau_factor, verbose=verbose)
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 50144 | ## ExoplanetsSysSim/examples/dr25_gaia_fgk/dr25_binrates_func.jl
## (c) 2019 Danley C. Hsu & Eric B. Ford
# Collection of functions specific to estimating DR25
# planet candidate occurrence rates over a 2D period-radius grid
using ExoplanetsSysSim
using StatsFuns
using JLD
using CSV
using DataFrames
using Distributions
## simulation_parameters
macro isdefinedlocal(var)
quote
try
$(esc(var))
true
catch err
isa(err, UndefVarError) ? false : rethrow(err)
end
end
end
function setup_sim_param_dr25binrates(args::Vector{String} = String[] ) # allow this to take a list of parameter (e.g., from command line)
sim_param = ExoplanetsSysSim.SimParam()
add_param_fixed(sim_param,"max_tranets_in_sys",7)
add_param_fixed(sim_param,"generate_star",ExoplanetsSysSim.generate_star_dumb)
add_param_fixed(sim_param,"generate_planetary_system", ExoplanetsSysSim.generate_planetary_system_uncorrelated_incl)
add_param_fixed(sim_param,"generate_kepler_target",ExoplanetsSysSim.generate_kepler_target_from_table)
add_param_fixed(sim_param,"star_table_setup",setup_star_table_dr25)
add_param_fixed(sim_param,"stellar_catalog","q1q17_dr25_gaia_fgk.jld")
add_param_fixed(sim_param,"osd_file","dr25fgk_osds.jld")
add_param_fixed(sim_param,"generate_num_planets",generate_num_planets_binrates_uniform)
add_param_fixed(sim_param,"generate_planet_mass_from_radius",ExoplanetsSysSim.generate_planet_mass_from_radius_powerlaw)
add_param_fixed(sim_param,"vetting_efficiency",ExoplanetsSysSim.vetting_efficiency_none)
add_param_fixed(sim_param,"mr_power_index",2.0)
add_param_fixed(sim_param,"mr_const",1.0)
add_param_fixed(sim_param,"generate_period_and_sizes", generate_period_and_sizes_binrates_uniform)
add_param_fixed(sim_param,"p_lim_full",[0.5, 1., 2., 4., 8., 16., 32., 64., 128., 256., 500.])
add_param_fixed(sim_param,"r_lim_full",[0.25, 0.5, 0.75, 1., 1.25, 1.5, 1.75, 2., 2.5, 3., 4., 6., 8., 12., 16.]*ExoplanetsSysSim.earth_radius)
#p_dim = length(p_lim_arr_num)-1
#r_dim = length(r_lim_arr_num)-1
#rate_tab_init = reshape(fill(1.0, p_dim*r_dim)*0.01,(r_dim,p_dim))
#add_param_fixed(sim_param, "p_lim_arr", p_lim_arr_num)
#add_param_fixed(sim_param, "r_lim_arr", r_lim_arr_num*ExoplanetsSysSim.earth_radius)
#add_param_active(sim_param,"obs_par", rate_tab_init)
add_param_fixed(sim_param,"generate_e_omega",ExoplanetsSysSim.generate_e_omega_rayleigh)
add_param_fixed(sim_param,"sigma_hk",0.03)
add_param_fixed(sim_param,"sigma_incl",2.0) # degrees
add_param_fixed(sim_param,"calc_target_obs_sky_ave",ExoplanetsSysSim.calc_target_obs_sky_ave)
add_param_fixed(sim_param,"calc_target_obs_single_obs",ExoplanetsSysSim.calc_target_obs_single_obs)
add_param_fixed(sim_param,"transit_noise_model",ExoplanetsSysSim.transit_noise_model_diagonal)
return sim_param
end
function set_test_param(sim_param_closure::SimParam)
@eval(include(joinpath(pwd(),"param.in")))
if @isdefinedlocal(stellar_catalog)
@assert (typeof(stellar_catalog) == String)
add_param_fixed(sim_param_closure,"stellar_catalog",stellar_catalog)
end
if @isdefinedlocal(koi_catalog)
@assert (typeof(koi_catalog) == String)
add_param_fixed(sim_param_closure,"koi_catalog",koi_catalog)
end
if @isdefinedlocal(num_targ_sim)
@assert (typeof(num_targ_sim) == Int)
add_param_fixed(sim_param_closure,"num_targets_sim_pass_one",num_targ_sim)
end
if @isdefinedlocal(osd_file)
@assert (typeof(osd_file) == String)
add_param_fixed(sim_param_closure,"osd_file",osd_file)
end
@assert (typeof(p_bin_lim) == Array{Float64,1})
add_param_fixed(sim_param_closure, "p_lim_arr", p_bin_lim)
@assert (typeof(r_bin_lim) == Array{Float64,1})
add_param_fixed(sim_param_closure, "r_lim_arr", r_bin_lim*ExoplanetsSysSim.earth_radius)
p_dim = length(get_any(sim_param_closure, "p_lim_arr", Array{Float64,1}))-1
r_dim = length(get_any(sim_param_closure, "r_lim_arr", Array{Float64,1}))-1
n_bin = p_dim*r_dim
if @isdefinedlocal(rate_init)
if typeof(rate_init) <: Real
@assert (rate_init >= 0.0)
rate_init_list = fill(rate_init, n_bin)
else
rate_init_list = rate_init
end
@assert (ndims(rate_init_list) <= 2)
if ndims(rate_init_list) == 1
@assert (length(rate_init_list) == n_bin)
rate_tab_init = reshape(rate_init_list*0.01, (r_dim, p_dim))
else
@assert (size(rate_init_list) == (r_dim, p_dim))
rate_tab_init = rate_init_list*0.01
end
add_param_active(sim_param_closure, "obs_par", rate_tab_init)
else
rate_init_list = fill(1.0, n_bin)
rate_tab_init = reshape(rate_init_list*0.01, (r_dim, p_dim))
add_param_active(sim_param_closure, "obs_par", rate_tab_init)
end
return sim_param_closure
end
function set_test_param_total(sim_param_closure::SimParam)
@eval(include(joinpath(pwd(),"param.in")))
if @isdefinedlocal(stellar_catalog)
@assert (typeof(stellar_catalog) == String)
add_param_fixed(sim_param_closure,"stellar_catalog",stellar_catalog)
end
if @isdefinedlocal(koi_catalog)
@assert (typeof(koi_catalog) == String)
add_param_fixed(sim_param_closure,"koi_catalog",koi_catalog)
end
if @isdefinedlocal(num_targ_sim)
@assert (typeof(num_targ_sim) == Int)
add_param_fixed(sim_param_closure,"num_targets_sim_pass_one",num_targ_sim)
end
if @isdefinedlocal(osd_file)
@assert (typeof(osd_file) == String)
add_param_fixed(sim_param_closure,"osd_file",osd_file)
end
@assert (typeof(p_bin_lim) == Array{Float64,1})
add_param_fixed(sim_param_closure, "p_lim_arr", p_bin_lim)
@assert (typeof(r_bin_lim) == Array{Float64,1})
add_param_fixed(sim_param_closure, "r_lim_arr", r_bin_lim*ExoplanetsSysSim.earth_radius)
p_dim = length(get_any(sim_param_closure, "p_lim_arr", Array{Float64,1}))-1
r_dim = length(get_any(sim_param_closure, "r_lim_arr", Array{Float64,1}))-1
n_bin = p_dim*r_dim
if @isdefinedlocal(rate_init)
if typeof(rate_init) <: Real
@assert (rate_init >= 0.0)
rate_init_list = fill(rate_init, n_bin)
else
rate_init_list = rate_init
end
@assert (ndims(rate_init_list) <= 2)
if ndims(rate_init_list) == 1
@assert (length(rate_init_list) == n_bin)
rate_tab_init = reshape(rate_init_list*0.01, (r_dim, p_dim))
else
@assert (size(rate_init_list) == (r_dim, p_dim))
rate_tab_init = rate_init_list*0.01
end
if r_dim > 1
lamb_col = sum(rate_tab_init, dims=1)
rate_tab_init = rate_tab_init ./ lamb_col
rate_tab_init = vcat(lamb_col, rate_tab_init)
end
add_param_active(sim_param_closure, "obs_par", rate_tab_init)
else
rate_init_list = fill(1.0, n_bin)
rate_tab_init = reshape(rate_init_list*0.01, (r_dim, p_dim))
if r_dim > 1
lamb_col = sum(rate_tab_init, dims=1)
rate_tab_init = rate_tab_init ./ lamb_col
rate_tab_init = vcat(lamb_col, rate_tab_init)
end
add_param_active(sim_param_closure, "obs_par", rate_tab_init)
end
if r_dim == 1
add_param_fixed(sim_param_closure,"generate_period_and_sizes", generate_period_and_sizes_binrates_single_rp)
end
return sim_param_closure
end
## planetary_system
function draw_uniform_selfavoiding(n::Integer; lower_bound::Real=0.0, upper_bound=1.0, min_separation::Real = 0.05, return_sorted::Bool=false )
@assert(n>=1)
@assert(upper_bound>lower_bound)
@assert(2*min_separation*n<upper_bound-lower_bound)
list = rand(n)
sorted_idx = collect(1:n)
segment_length = upper_bound-lower_bound
list[1] = lower_bound+segment_length*list[1] # First draw is standard uniform
segment_length -= min(upper_bound,list[1]+min_separation)-max(lower_bound,list[1]-min_separation)
for i in 2:n
segment_length -= min(upper_bound,list[i-1]+min_separation)-max(lower_bound,list[i-1]-min_separation) # Reduce length for future draws
list[i] *= segment_length # Draw over reduced range based on which segments need to be excluded
list[i] += lower_bound
j = 1
while j<= i-1 # Checking for conflicts
k = sorted_idx[j] # Going from low to high
if list[i]>list[k]-min_separation # If too close, then bu
list[i] += min(upper_bound,list[k]+min_separation)-max(lower_bound,list[k]-min_separation)
else
break
end
j += 1
end
for k in i:-1:(j+1) # Keep larger values sorted
sorted_idx[k]=sorted_idx[k-1]
end
sorted_idx[j] = i # Save order for this draw
#segment_length -= min(upper_bound,list[i]+min_separation)-max(lower_bound,list[i]-min_separation) # Reduce length for future draws
end
return return_sorted ? list[sorted_idx] : list
end
function generate_num_planets_binrates_uniform(s::Star, sim_param::SimParam)
local max_tranets_in_sys::Int64 = get_int(sim_param,"max_tranets_in_sys") # TODO SCI: Is 7 planets max per system OK, even when fitting across potentially 9 period bins?
#local max_tranets_per_P::Int64 = 3 # Set maximum number of planets per period range as loose stability criteria and to prevent near-crossing orbits
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_arr", Array{Float64,1})
local p_dim = length(limitP)-1
local r_dim = length(get_any(sim_param, "r_lim_arr", Array{Float64,1}))-1
sum_lambda = 0
for i in 1:p_dim
sum_lambda += ExoplanetsSysSim.generate_num_planets_poisson(sum(rate_tab[:,i]), convert(Int64, floor(3*log(limitP[i+1]/limitP[i])/log(2))))
end
#println("# lambda= ", sum_lambda)
return min(sum_lambda, max_tranets_in_sys)
end
function generate_num_planets_binrates_beta(s::Star, sim_param::SimParam)
local max_tranets_in_sys::Int64 = get_int(sim_param,"max_tranets_in_sys") # TODO SCI: Is 7 planets max per system OK, even when fitting across potentially 9 period bins?
#local max_tranets_per_P::Int64 = 3 # Set maximum number of planets per period range as loose stability criteria and to prevent near-crossing orbits
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_arr", Array{Float64,1})
local p_dim = length(limitP)-1
local r_dim = length(get_any(sim_param, "r_lim_arr", Array{Float64,1}))-1
local bin_size_factor::Float64 = get_real(sim_param, "bin_size_factor")
sum_lambda = 0
for i in 1:p_dim
sum_lambda += ExoplanetsSysSim.generate_num_planets_poisson(bin_size_factor*3*log(limitP[i+1]/limitP[i])/log(2)*sum(rate_tab[:,i]), convert(Int64, floor(3*log(limitP[i+1]/limitP[i])/log(2))))
end
#println("# lambda= ", sum_lambda)
return min(sum_lambda, max_tranets_in_sys)
end
function generate_num_planets_binrates_dirichlet(s::Star, sim_param::SimParam)
local max_tranets_in_sys::Int64 = get_int(sim_param,"max_tranets_in_sys") # TODO SCI: Is 7 planets max per system OK, even when fitting across potentially 9 period bins?
#local max_tranets_per_P::Int64 = 3 # Set maximum number of planets per period range as loose stability criteria and to prevent near-crossing orbits
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_arr", Array{Float64,1})
local p_dim = length(limitP)-1
local r_dim = length(get_any(sim_param, "r_lim_arr", Array{Float64,1}))-1
sum_lambda = 0
for i in 1:p_dim
sum_lambda += ExoplanetsSysSim.generate_num_planets_poisson(rate_tab[1,i], convert(Int64, floor(3*log(limitP[i+1]/limitP[i])/log(2))))
end
#println("# lambda= ", sum_lambda)
return min(sum_lambda, max_tranets_in_sys)
end
function generate_period_and_sizes_binrates_uniform(s::Star, sim_param::SimParam; num_pl::Integer = 1)
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(sim_param, "r_lim_arr", Array{Float64,1})
local r_dim = length(limitRp)-1
sepa_min = 0.05 # Minimum orbital separation in AU
backup_sepa_factor_slightly_less_than_one = 0.95
@assert ((length(limitP)-1) == size(rate_tab, 2))
@assert ((length(limitRp)-1) == size(rate_tab, 1))
Plist = zeros(num_pl)
Rplist = zeros(num_pl)
rate_tab_1d = reshape(rate_tab,length(rate_tab))
maxcuml = sum(rate_tab_1d)
cuml = cumsum(rate_tab_1d/maxcuml)
# We assume uniform sampling in log P and log Rp within each bin
j_idx = ones(Int64, num_pl)
for n in 1:num_pl
rollp = Base.rand()
idx = findfirst(x -> x > rollp, cuml)
i_idx = (idx-1)%size(rate_tab,1)+1
j_idx[n] = floor(Int64,(idx-1)//size(rate_tab,1))+1
Rplist[n] = exp(Base.rand()*(log(limitRp[i_idx+1])-log(limitRp[i_idx]))+log(limitRp[i_idx]))
end
for j in 1:(length(limitP)-1)
tmp_ind = findall(x -> x == j, j_idx)
if length(tmp_ind) > 0
redraw_att = 0
invalid_config = true
while invalid_config && redraw_att < 20
n_range = length(tmp_ind)
loga_min = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass))
loga_min_ext = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass)+sepa_min) # Used for determining minimum semimajor axis separation
loga_max = log(ExoplanetsSysSim.semimajor_axis(limitP[j+1], s.mass))
logsepa_min = min(loga_min_ext-loga_min, (loga_max-loga_min)/n_range/2*backup_sepa_factor_slightly_less_than_one) # Prevents minimum separations too large
tmp_logalist = draw_uniform_selfavoiding(n_range,min_separation=logsepa_min,lower_bound=loga_min,upper_bound=loga_max)
tmp_Plist = exp.((3*tmp_logalist .- log(s.mass))/2)*ExoplanetsSysSim.day_in_year # Convert from log a (in AU) back to P (in days)
invalid_config = false
redraw_att += 1
for n in 1:n_range
if tmp_Plist[n] < limitP[j] || tmp_Plist[n] > limitP[j+1]
invalid_config = true
else
Plist[tmp_ind[n]] = tmp_Plist[n]
end
end
end
end
end
return Plist, Rplist
end
function generate_period_and_sizes_binrates_beta(s::Star, sim_param::SimParam; num_pl::Integer = 1)
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(sim_param, "r_lim_arr", Array{Float64,1})
local bin_size_factor::Float64 = get_real(sim_param, "bin_size_factor")
local r_dim = length(limitRp)-1
sepa_min = 0.05 # Minimum orbital separation in AU
backup_sepa_factor_slightly_less_than_one = 0.95
@assert ((length(limitP)-1) == size(rate_tab, 2))
@assert ((length(limitRp)-1) == size(rate_tab, 1))
Plist = zeros(num_pl)
Rplist = zeros(num_pl)
rate_tab_1d = reshape([3*log(limitP[i+1]/limitP[i])/log(2) for i in 1:length(limitP)-1]'.*rate_tab,length(rate_tab))
maxcuml = sum(rate_tab_1d)
cuml = cumsum(rate_tab_1d/maxcuml)
# We assume uniform sampling in log P and log Rp within each bin
j_idx = ones(Int64, num_pl)
for n in 1:num_pl
rollp = Base.rand()
idx = findfirst(x -> x > rollp, cuml)
i_idx = (idx-1)%size(rate_tab,1)+1
j_idx[n] = floor(Int64,(idx-1)//size(rate_tab,1))+1
Rplist[n] = exp(Base.rand()*(log(limitRp[i_idx+1])-log(limitRp[i_idx]))+log(limitRp[i_idx]))
end
for j in 1:(length(limitP)-1)
tmp_ind = findall(x -> x == j, j_idx)
if length(tmp_ind) > 0
redraw_att = 0
invalid_config = true
while invalid_config && redraw_att < 20
n_range = length(tmp_ind)
loga_min = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass))
loga_min_ext = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass)+sepa_min) # Used for determining minimum semimajor axis separation
loga_max = log(ExoplanetsSysSim.semimajor_axis(limitP[j+1], s.mass))
logsepa_min = min(loga_min_ext-loga_min, (loga_max-loga_min)/n_range/2*backup_sepa_factor_slightly_less_than_one) # Prevents minimum separations too large
tmp_logalist = draw_uniform_selfavoiding(n_range,min_separation=logsepa_min,lower_bound=loga_min,upper_bound=loga_max)
tmp_Plist = exp.((3*tmp_logalist .- log(s.mass))/2)*ExoplanetsSysSim.day_in_year # Convert from log a (in AU) back to P (in days)
invalid_config = false
redraw_att += 1
for n in 1:n_range
if tmp_Plist[n] < limitP[j] || tmp_Plist[n] > limitP[j+1]
invalid_config = true
else
Plist[tmp_ind[n]] = tmp_Plist[n]
end
end
end
end
end
return Plist, Rplist
end
function generate_period_and_sizes_binrates_dirichlet(s::Star, sim_param::SimParam; num_pl::Integer = 1)
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(sim_param, "r_lim_arr", Array{Float64,1})
local r_dim = length(limitRp)-1
sepa_min = 0.05 # Minimum orbital separation in AU
backup_sepa_factor_slightly_less_than_one = 0.95
@assert ((length(limitP)-1) == size(rate_tab, 2))
@assert ((length(limitRp)-1) == (size(rate_tab, 1)-1))
Plist = zeros(num_pl)
Rplist = zeros(num_pl)
maxcuml = sum(rate_tab[1,:])
cuml = cumsum(rate_tab[1,:]/maxcuml)
# We assume uniform sampling in log P and log Rp within each bin
j_idx = ones(Int64, num_pl)
for n in 1:num_pl
rollp = Base.rand()
j_idx[n] = findfirst(x -> x > rollp, cuml)
end
for j in 1:(length(limitP)-1)
tmp_ind = findall(x -> x == j, j_idx)
if length(tmp_ind) > 0
redraw_att = 0
invalid_config = true
while invalid_config && redraw_att < 20
n_range = length(tmp_ind)
loga_min = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass))
loga_min_ext = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass)+sepa_min) # Used for determining minimum semimajor axis separation
loga_max = log(ExoplanetsSysSim.semimajor_axis(limitP[j+1], s.mass))
logsepa_min = min(loga_min_ext-loga_min, (loga_max-loga_min)/n_range/2*backup_sepa_factor_slightly_less_than_one) # Prevents minimum separations too large
tmp_logalist = draw_uniform_selfavoiding(n_range,min_separation=logsepa_min,lower_bound=loga_min,upper_bound=loga_max)
tmp_Plist = exp.((3*tmp_logalist .- log(s.mass))/2)*ExoplanetsSysSim.day_in_year # Convert from log a (in AU) back to P (in days)
rad_dist = Distributions.Categorical(rate_tab[((j-1)*(r_dim+1)+2):((j-1)*(r_dim+1)+(r_dim+1))]) # Distribution for fraction of times the next planet draw would be assigned to a given radius bin
invalid_config = false
redraw_att += 1
for n in 1:n_range
if tmp_Plist[n] < limitP[j] || tmp_Plist[n] > limitP[j+1]
invalid_config = true
else
Plist[tmp_ind[n]] = tmp_Plist[n]
end
i_idx = rand(rad_dist)
Rplist[tmp_ind[n]] = exp(Base.rand()*(log(limitRp[i_idx+1])-log(limitRp[i_idx]))+log(limitRp[i_idx]))
end
end
end
end
return Plist, Rplist
end
## stellar_table
function setup_dr25(sim_param::SimParam; force_reread::Bool = false)
#global df
wf = WindowFunction.setup_window_function(sim_param)
WindowFunction.setup_OSD_interp(sim_param) #read in osd files so they can be interpolated
df = ExoplanetsSysSim.StellarTable.df
if haskey(sim_param,"read_stellar_catalog") && !force_reread
return df
#return data
end
stellar_catalog_filename = convert(String,joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")), "data", convert(String,get(sim_param,"stellar_catalog","q1_q17_dr25_stellar.csv")) ) )
df = setup_dr25(stellar_catalog_filename)
add_param_fixed(sim_param,"read_stellar_catalog",true)
add_param_fixed(sim_param,"num_kepler_targets",StellarTable.num_usable_in_star_table())
if !haskey(sim_param.param,"num_targets_sim_pass_one")
add_param_fixed(sim_param,"num_targets_sim_pass_one", StellarTable.num_usable_in_star_table())
end
StellarTable.set_star_table(df)
return df
end
function setup_dr25(filename::String; force_reread::Bool = false)
#global df, usable
df = ExoplanetsSysSim.StellarTable.df
#usable = ExoplanetsSysSim.StellarTable.usable
if occursin(r".jld2$",filename)
try
data = load(filename)
df = data["stellar_catalog"]
#usable::Array{Int64,1} = data["stellar_catalog_usable"]
Core.typeassert(df,DataFrame)
StellarTable.set_star_table(df)
catch
error(string("# Failed to read stellar catalog >",filename,"< in jld2 format."))
end
else
try
df = CSV.read(filename)
catch
error(string("# Failed to read stellar catalog >",filename,"< in ascii format."))
end
has_mass = .! (ismissing.(df[:mass]) .| ismissing.(df[:mass_err1]) .| ismissing.(df[:mass_err2]))
has_radius = .! (ismissing.(df[:radius]) .| ismissing.(df[:radius_err1]) .| ismissing.(df[:radius_err2]))
has_dens = .! (ismissing.(df[:dens]) .| ismissing.(df[:dens_err1]) .| ismissing.(df[:dens_err2]))
has_cdpp = .! (ismissing.(df[:rrmscdpp01p5]) .| ismissing.(df[:rrmscdpp02p0]) .| ismissing.(df[:rrmscdpp02p5]) .| ismissing.(df[:rrmscdpp03p0]) .| ismissing.(df[:rrmscdpp03p5]) .| ismissing.(df[:rrmscdpp04p5]) .| ismissing.(df[:rrmscdpp05p0]) .| ismissing.(df[:rrmscdpp06p0]) .| ismissing.(df[:rrmscdpp07p5]) .| ismissing.(df[:rrmscdpp09p0]) .| ismissing.(df[:rrmscdpp10p5]) .| ismissing.(df[:rrmscdpp12p0]) .| ismissing.(df[:rrmscdpp12p5]) .| ismissing.(df[:rrmscdpp15p0]))
has_ld = .! (ismissing.(df[:limbdark_coeff1]) .| ismissing.(df[:limbdark_coeff2]) .| ismissing.(df[:limbdark_coeff3]) .| ismissing.(df[:limbdark_coeff4]))
has_rest = .! (ismissing.(df[:dataspan]) .| ismissing.(df[:dutycycle]))
in_Q1Q12 = []
obs_gt_5q = []
for x in df[:st_quarters]
subx = string(x)
num_q_obs = length(matchall(r"1", subx))
push!(obs_gt_5q, num_q_obs>5)
subx = ("0"^(17-length(subx)))*subx
indQ = search(subx, '1')
if ((indQ < 1) | (indQ > 12)) || num_q_obs<=5
push!(in_Q1Q12, false)
else
push!(in_Q1Q12, true)
end
end
is_FGK = []
for x in 1:length(df[:teff])
if ((df[x,:teff] > 4000.0) & (df[x,:teff] < 7000.0) & (df[x,:logg] > 4.0))
push!(is_FGK, true)
else
push!(is_FGK, false)
end
end
is_usable = has_radius .& is_FGK .& has_mass .& has_rest .& has_dens .& has_cdpp .& obs_gt_5q .& has_ld
if contains(filename,"q1_q16_stellar.csv")
is_usable = is_usable .& in_Q1Q12
end
# See options at: http://exoplanetarchive.ipac.caltech.edu/docs/API_keplerstellar_columns.html
symbols_to_keep = [ :kepid, :mass, :mass_err1, :mass_err2, :radius, :radius_err1, :radius_err2, :dens, :dens_err1, :dens_err2, :rrmscdpp01p5, :rrmscdpp02p0, :rrmscdpp02p5, :rrmscdpp03p0, :rrmscdpp03p5, :rrmscdpp04p5, :rrmscdpp05p0, :rrmscdpp06p0, :rrmscdpp07p5, :rrmscdpp09p0, :rrmscdpp10p5, :rrmscdpp12p0, :rrmscdpp12p5, :rrmscdpp15p0, :cdppslplong, :cdppslpshrt, :dataspan, :dutycycle, :limbdark_coeff1, :limbdark_coeff2, :limbdark_coeff3, :limbdark_coeff4 ]
delete!(df, [~(x in symbols_to_keep) for x in names(df)]) # delete columns that we won't be using anyway
usable = findall(is_usable)
df = df[usable, symbols_to_keep]
tmp_df = DataFrame()
for col in names(df)
tmp_df[col] = collect(skipmissing(df[col]))
end
df = tmp_df
mast_df = CSV.read(convert(String,joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")), "data", "KeplerMAST_TargetProperties.csv")))
delete!(mast_df, [~(x in [:kepid, :contam]) for x in names(mast_df)])
df = join(df, mast_df, on=:kepid)
StellarTable.set_star_table(df)
end
println("# Removing stars observed <5 quarters.")
df[!,:wf_id] = map(x->ExoplanetsSysSim.WindowFunction.get_window_function_id(x,use_default_for_unknown=false),df[!,:kepid])
obs_5q = df[!,:wf_id].!=-1
df = df[obs_5q, names(df)]
StellarTable.set_star_table(df)
return df
end
setup_star_table_dr25(sim_param::SimParam; force_reread::Bool = false) = setup_dr25(sim_param, force_reread=force_reread)
setup_star_table_dr25(filename::String; force_reread::Bool = false) = setup_dr25(filename, force_reread=force_reread)
## summary_statistics
function calc_summary_stats_obs_binned_rates(cat_obs::KeplerObsCatalog, param::SimParam; trueobs_cat::Bool = false, obs_skyavg::Bool = false)
ssd = Dict{String,Any}()
cache = Dict{String,Any}()
if !trueobs_cat
ssd["num targets"] = get_int(param,"num_targets_sim_pass_one")
else
ssd["num targets"] = get_int(param,"num_kepler_targets")
end
max_tranets_in_sys = get_int(param,"max_tranets_in_sys") # Demo that simulation parameters can specify how to evalute models, too
@assert max_tranets_in_sys >= 1
idx_tranets = findall(x::KeplerTargetObs-> length(x.obs) > 0, cat_obs.target)::Array{Int64,1} # Find indices of systems with at least 1 tranet = potentially detectable transiting planet
# Count total number of tranets and compile indices for N-tranet systems
num_tranets = 0
idx_n_tranets = Vector{Int64}[ Int64[] for m = 1:max_tranets_in_sys]
for n in 1:max_tranets_in_sys-1
idx_n_tranets[n] = findall(x::KeplerTargetObs-> length(x.obs) == n, cat_obs.target[idx_tranets] )
num_tranets += n*length(idx_n_tranets[n])
end
idx_n_tranets[max_tranets_in_sys] = findall(x::KeplerTargetObs-> length(x.obs) >= max_tranets_in_sys, cat_obs.target[idx_tranets] )
num_tranets += max_tranets_in_sys*length(idx_n_tranets[max_tranets_in_sys]) # WARNING: this means we need to ignore planets w/ indices > max_tranets_in_sys
if ( length( findall(x::KeplerTargetObs-> length(x.obs) > max_tranets_in_sys, cat_obs.target[idx_tranets] ) ) > 0) # Make sure max_tranets_in_sys is at least big enough for observed systems
warn("Observational data has more transiting planets in one systems than max_tranets_in_sys allows.")
end
num_tranets = convert(Int64,num_tranets) # TODO OPT: Figure out why isn't this already an Int. I may be doing something that prevents some optimizations
num_sys_tranets = zeros(max_tranets_in_sys) # Since observed data, don't need to calculate probabilities.
for n in 1:max_tranets_in_sys # Make histogram of N-tranet systems
num_sys_tranets[n] = length(idx_n_tranets[n])
end
ssd["num_sys_tranets"] = num_sys_tranets
ssd["planets detected"] = num_tranets
period_list = zeros(num_tranets)
weight_list = zeros(num_tranets)
radius_list = zeros(num_tranets)
n = 1 # tranet id
if !trueobs_cat
for i in idx_tranets
ld = ExoplanetsSysSim.LimbDarkeningParam4thOrder(ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff1), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff2), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff3), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff4) )
flux_ratio = (1.0+ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id, :contam))/1.0 # WARNING: Assumes flux = 1
#Rstar = trueobs_cat ? cat_obs.target[i].star.radius : ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id, :radius)
for j in 1:num_planets(cat_obs.target[i])
period_list[n] = cat_obs.target[i].obs[j].period
if obs_skyavg
weight_list[n] = min(ExoplanetsSysSim.prob_detect(cat_obs.target[i].prob_detect,j), 1.0) # CHECK WHAT THIS DOES
else
weight_list[n] = 1.0
end
radius_ratio = ExoplanetsSysSim.ratio_from_depth(cat_obs.target[i].obs[j].depth*flux_ratio, ld)
radius_list[n] = radius_ratio*ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id, :radius)
#radius_list[n] = sqrt(cat_obs.target[i].obs[j].depth)*cat_obs.target[i].star.radius
#radius_list[n] = sqrt(cat_obs.target[i].obs[j].depth)*Rstar
n = n+1
end
end
else
for i in idx_tranets
ld = ExoplanetsSysSim.LimbDarkeningParam4thOrder(ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff1), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff2), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff3), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff4) )
flux_ratio = (1.0+ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id, :contam))/1.0 # WARNING: Assumes flux = 1
for j in 1:num_planets(cat_obs.target[i])
period_list[n] = cat_obs.target[i].obs[j].period
weight_list[n] = 1.0
radius_ratio = ExoplanetsSysSim.ratio_from_depth(cat_obs.target[i].obs[j].depth*flux_ratio, ld)
radius_list[n] = radius_ratio*cat_obs.target[i].star.radius
#radius_list[n] = sqrt(cat_obs.target[i].obs[j].depth)*cat_obs.target[i].star.radius
n = n+1
end
end
end
#ssd["period_list"] = period_list
ssd["weight_list"] = weight_list
#ssd["radius_list"] = radius_list
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
np_bin = zeros((length(limitP)-1) * (length(limitRp)-1))
np_bin_idx = 1
bin_match_list = fill(fill(0,0),(length(limitP)-1)*(length(limitRp)-1))
for i in 1:(length(limitP)-1)
P_match = findall(x -> ((x > limitP[i]) && (x < limitP[i+1])), period_list)
for j in 1:(length(limitRp)-1)
R_match = findall(x -> ((x > limitRp[j]) && (x < limitRp[j+1])), radius_list)
bin_match = intersect(P_match, R_match)
bin_match_list[np_bin_idx] = bin_match
np_bin[np_bin_idx] = sum(weight_list[bin_match])
np_bin_idx += 1
end
end
cache["bin_match_list"] = bin_match_list
#ssd["planets detected"] = sum(np_bin)
ssd["planets table"] = np_bin
return CatalogSummaryStatistics(ssd, cache)
end
## abc_distance
function calc_distance_vector_binned(summary1::CatalogSummaryStatistics, summary2::CatalogSummaryStatistics, pass::Int64, sim_param::SimParam ; verbose::Bool = false)
p_dim = length(get_any(sim_param, "p_lim_arr", Array{Float64,1}))-1
r_dim = length(get_any(sim_param, "r_lim_arr", Array{Float64,1}))-1
#rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
d = Array{Float64}(undef,0)
if pass == 1
if verbose
println("# Summary 1, pass 1: ",summary1)
println("# Summary 2, pass 1: ",summary2)
end
d = zeros(3)
np1 = haskey(summary1.stat,"planets table") ? summary1.stat["planets table"] : summary1.stat["expected planets table"]
np2 = haskey(summary2.stat,"planets table") ? summary2.stat["planets table"] : summary2.stat["expected planets table"]
np_bin = zeros(length(np1))
num_detect_sim = zeros(length(np1))
### Bernoulli distance
bin_match_list = summary2.cache["bin_match_list"]
@assert length(bin_match_list) == length(np1)
np2 = zeros(Int64,length(np1))
###
for n in 1:length(np1)
#np_bin[n] = dist_L1_abs(np1[n]/summary1.stat["num targets"], np2[n]/summary2.stat["num targets"])
#np_bin[n] = dist_L2_abs(np1[n]/summary1.stat["num targets"], np2[n]/summary2.stat["num targets"])
#np_bin[n] = distance_poisson_draw(np2[n]/summary2.stat["num targets"]*summary1.stat["num targets"], convert(Int64, np1[n]))
np_bin[n], num_detect_sim[n] = distance_sum_of_bernoulli_draws(floor(Int64,np1[n]),summary1.stat["num targets"], summary2.stat["weight_list"], summary2.stat["num targets"], bin_match_list[n])
#println("True # [Bin ", n,"] = ",np1[n],", Expected # [Bin ", n,"] = ",np2[n])
end
#d[1] = maximum(np_bin)
#d[1] = sum(np_bin)
np1_ratio = np1 ./ summary1.stat["num targets"]
np2_ratio = num_detect_sim ./ summary1.stat["num targets"]
d[1] = distance_canberra(np1_ratio, np2_ratio)# + distance_cosine(np1_ratio, np2_ratio)
#println("Total rate: ", rate_tab[1,1], " / Distance (radii): ", d[1], " / Sim. cat. ratio = ", sum(num_detect_sim[1:r_dim])/summary2.stat["num_targets"], " / Obs. cat. ratio = ", sum(np1[1:r_dim])/summary1.stat["num targets"], " / Distance (total): ", dist_L2_abs(sum(num_detect_sim[1:r_dim])/summary2.stat["num targets"], sum(np1[1:r_dim])/summary1.stat["num targets"])*r_dim)
# for j in 1:p_dim
# d[1] += dist_L2_abs(sum(num_detect_sim[(j-1)*r_dim+1:(j-1)*r_dim+r_dim])/summary1.stat["num targets"], sum(np1[(j-1)*r_dim+1:(j-1)*r_dim+r_dim])/summary1.stat["num targets"])*r_dim
# end
else
println("# calc_distance_vector_demo doesn't know what to do for pass= ", pass)
end
return d
end
## eval_model
# function test_dr25binrates()
# global sim_param_closure = setup_sim_param_dr25binrates()
# cat_phys = generate_kepler_physical_catalog(sim_param_closure)
# cat_obs = observe_kepler_targets_single_obs(cat_phys,sim_param_closure)
# global summary_stat_ref_closure = calc_summary_stats_obs_demo(cat_obs,sim_param_closure)
# global cat_phys_try_closure = generate_christiansen_catalog(sim_param_closure)
# global cat_obs_try_closure = observe_kepler_targets_sky_avg(cat_phys_try_closure,sim_param_closure)
# global summary_stat_try_closure = calc_summary_stats_sim_pass_one_demo(cat_obs_try_closure,cat_phys_try_closure,sim_param_closure)
# summary_stat_try_closure = calc_summary_stats_sim_pass_two_demo(cat_obs_try_closure,cat_phys_try_closure,summary_stat_try_closure,sim_param_closure)
# param_guess = make_vector_of_sim_param(sim_xparam_closure)
# evaluate_model_scalar_ret( param_guess)
# end
## inverse_detection & simple bayesian
function inv_det(cat_obs::KeplerObsCatalog, param::SimParam)
num_targ = ExoplanetsSysSim.StellarTable.num_usable_in_star_table()
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
println("------------------------------")
cnt_bin, np_bin = cnt_np_bin(cat_obs, param)
println("------------------------------")
println("Inverse Detection Rates:")
for i in 1:(length(limitP)-1)
for j in 1:(length(limitRp)-1)
rate_f = np_bin[(i-1)*(length(limitRp)-1) + j]/num_targ*100.
if cnt_bin[(i-1)*(length(limitRp)-1) + j] > 0.
println(rate_f,
" +/- ", rate_f/sqrt(cnt_bin[(i-1)*(length(limitRp)-1) + j]), " %")
else
println(rate_f,
" +/- N/A %")
end
end
end
println()
end
function simp_bayes(cat_obs::KeplerObsCatalog, param::SimParam)
num_targ = ExoplanetsSysSim.StellarTable.num_usable_in_star_table()
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
println("------------------------------")
cnt_bin, np_bin = cnt_np_bin(cat_obs, param)
println("------------------------------")
ess_bin = stellar_ess(param)
println("------------------------------")
println("Simple Bayesian Rates:")
for i in 1:(length(limitP)-1)
for j in 1:(length(limitRp)-1)
rate_f = (1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j])/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])*100.
up_quant = quantile(Gamma(1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j], 1.0/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])), 0.8413)*100.
low_quant = quantile(Gamma(1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j], 1.0/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])), 0.1587)*100.
println(rate_f,
" + ", up_quant - rate_f,
" - ", rate_f - low_quant, " %")
end
end
println()
end
function inv_det_simp_bayes(cat_obs::KeplerObsCatalog, param::SimParam)
num_targ = ExoplanetsSysSim.StellarTable.num_usable_in_star_table()
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
println("------------------------------")
cnt_bin, np_bin = cnt_np_bin(cat_obs, param)
println("------------------------------")
ess_bin = stellar_ess(param)
println("------------------------------")
println("Inverse Detection Rates:")
for i in 1:(length(limitP)-1)
for j in 1:(length(limitRp)-1)
rate_f = np_bin[(i-1)*(length(limitRp)-1) + j]/num_targ*100.
if cnt_bin[(i-1)*(length(limitRp)-1) + j] > 0.
println(rate_f,
" +/- ", rate_f/sqrt(cnt_bin[(i-1)*(length(limitRp)-1) + j]), " %")
else
println(rate_f,
" +/- N/A %")
end
end
end
println()
println("Simple Bayesian Rates:")
for i in 1:(length(limitP)-1)
for j in 1:(length(limitRp)-1)
rate_f = (1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j])/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])*100.
up_quant = quantile(Gamma(1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j], 1.0/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])), 0.8413)*100.
low_quant = quantile(Gamma(1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j], 1.0/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])), 0.1587)*100.
println(rate_f,
" + ", up_quant - rate_f,
" - ", rate_f - low_quant, " %")
end
end
println()
end
## cnt_bin & np_bin (inverse detection & simple bayesian)
function cnt_np_bin(cat_obs::KeplerObsCatalog, param::SimParam, verbose::Bool = true)
num_targ = ExoplanetsSysSim.StellarTable.num_usable_in_star_table()
idx_tranets = findall(x::KeplerTargetObs-> length(x.obs) > 0, cat_obs.target)::Array{Int64,1}
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
np_bin = zeros((length(limitP)-1) * (length(limitRp)-1))
cnt_bin = zeros((length(limitP)-1) * (length(limitRp)-1))
pl_idx = 1
println("Calculating completeness for each planet...")
for i in idx_tranets
for j in 1:num_planets(cat_obs.target[i])
pper = cat_obs.target[i].obs[j].period
prad = sqrt(cat_obs.target[i].obs[j].depth)*cat_obs.target[i].star.radius
pbin = findfirst(x -> ((pper > limitP[x]) && (pper < limitP[x+1])), collect(1:(length(limitP)-1)))
rbin = findfirst(x -> ((prad > limitRp[x]) && (prad < limitRp[x+1])), collect(1:(length(limitRp)-1)))
if (pbin > 0 && rbin > 0)
cnt_bin[(pbin-1)*(length(limitRp)-1) + rbin] += 1
pgeo = ExoplanetsSysSim.calc_transit_prob_single_planet_approx(pper, cat_obs.target[i].star.radius, cat_obs.target[i].star.mass)
pdet = 0.0
for star_id in 1:num_targ
ld = ExoplanetsSysSim.LimbDarkeningParam4thOrder(ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff1), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff2), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff3), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff4) )
star = SingleStar(ExoplanetsSysSim.StellarTable.star_table(star_id,:radius),ExoplanetsSysSim.StellarTable.star_table(star_id,:mass),1.0, ld, star_id)
cdpp_arr = ExoplanetsSysSim.make_cdpp_array_empty(star_id)#(1.0e-6*sqrt(1.0 / 24.0 / ExoplanetsSysSim.LC_duration)) .* [ExoplanetsSysSim.StellarTable.star_table(star_id, :rrmscdpp01p5)*sqrt(1.5), ExoplanetsSysSim.StellarTable.star_table(star_id, :rrmscdpp02p0)*sqrt(2.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp02p5)*sqrt(2.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp03p0)*sqrt(3.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp03p5)*sqrt(3.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp04p5)*sqrt(4.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp05p0)*sqrt(5.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp06p0)*sqrt(6.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp07p5)*sqrt(7.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp09p0)*sqrt(9.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp10p5)*sqrt(10.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp12p0)*sqrt(12.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp12p5)*sqrt(12.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp15p0)*sqrt(15.)]
#cdpp = 1.0e-6 * ExoplanetsSysSim.StellarTable.star_table(star_id, :rrmscdpp04p5) * sqrt(4.5/24.0 / ExoplanetsSysSim.LC_duration )
contam = 0.0
data_span = ExoplanetsSysSim.StellarTable.star_table(star_id, :dataspan)
duty_cycle = ExoplanetsSysSim.StellarTable.star_table(star_id, :dutycycle)
pl_arr = Array{Planet}(undef,1)
orbit_arr = Array{Orbit}(undef,1)
incl = acos(Base.rand()*star.radius*ExoplanetsSysSim.rsol_in_au/ExoplanetsSysSim.semimajor_axis(pper, star.mass))
orbit_arr[1] = Orbit(pper, 0., incl, 0., 0., Base.rand()*2.0*pi)
pl_arr[1] = Planet(prad, 1.0e-6)
if ExoplanetsSysSim.StellarTable.star_table_has_key(:wf_id)
wf_id = ExoplanetsSysSim.StellarTable.star_table(star_id,:wf_id)
else
wf_id = ExoplanetsSysSim.WindowFunction.get_window_function_id(ExoplanetsSysSim.StellarTable.star_table(star_id,:kepid))
end
kep_targ = KeplerTarget([PlanetarySystem(star, pl_arr, orbit_arr)], repeat(cdpp_arr, outer=[1,1]),contam,data_span,duty_cycle,wf_id)
duration = ExoplanetsSysSim.calc_transit_duration(kep_targ,1,1)
if duration <= 0.
continue
end
ntr = ExoplanetsSysSim.calc_expected_num_transits(kep_targ, 1, 1, param)
depth = ExoplanetsSysSim.calc_transit_depth(kep_targ,1,1)
cdpp = ExoplanetsSysSim.interpolate_cdpp_to_duration_lookup_cdpp(kep_targ, duration)
#cdpp = ExoplanetsSysSim.interpolate_cdpp_to_duration(kep_targ, duration)
snr = ExoplanetsSysSim.calc_snr_if_transit_cdpp(kep_targ, depth, duration, cdpp, param, num_transit=ntr)
pdet += ExoplanetsSysSim.calc_prob_detect_if_transit(kep_targ, snr, pper, duration, param, num_transit=ntr)
end
np_bin[(pbin-1)*(length(limitRp)-1) + rbin] += 1.0/pgeo/(pdet/num_targ)
if verbose
println("Planet ",pl_idx," => Bin ", (pbin-1)*(length(limitRp)-1) + rbin, ", C = ", 1.0/pgeo/(pdet/num_targ))
end
pl_idx += 1
end
end
end
return cnt_bin, np_bin
end
## stellar catalog ess (simple bayesian)
function stellar_ess(param::SimParam, verbose::Bool = true)
num_realiz = 100
num_targ = ExoplanetsSysSim.StellarTable.num_usable_in_star_table()
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
ess_bin = zeros((length(limitP)-1) * (length(limitRp)-1))
println(string("Stellar ESS calculation beginning..."))
for star_id in 1:num_targ
ld = ExoplanetsSysSim.LimbDarkeningParam4thOrder(ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff1), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff2), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff3), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff4) )
star = SingleStar(ExoplanetsSysSim.StellarTable.star_table(star_id,:radius),ExoplanetsSysSim.StellarTable.star_table(star_id,:mass),1.0, ld, star_id)
cdpp_arr = ExoplanetsSysSim.make_cdpp_array_empty(star_id)#(1.0e-6*sqrt(1.0/24.0/ExoplanetsSysSim.LC_duration)) .* [ExoplanetsSysSim.StellarTable.star_table(star_id, :rrmscdpp01p5)*sqrt(1.5), ExoplanetsSysSim.StellarTable.star_table(star_id, :rrmscdpp02p0)*sqrt(2.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp02p5)*sqrt(2.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp03p0)*sqrt(3.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp03p5)*sqrt(3.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp04p5)*sqrt(4.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp05p0)*sqrt(5.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp06p0)*sqrt(6.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp07p5)*sqrt(7.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp09p0)*sqrt(9.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp10p5)*sqrt(10.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp12p0)*sqrt(12.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp12p5)*sqrt(12.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp15p0)*sqrt(15.)]
contam = 0.0
data_span = ExoplanetsSysSim.StellarTable.star_table(star_id, :dataspan)
duty_cycle = ExoplanetsSysSim.StellarTable.star_table(star_id, :dutycycle)
if ExoplanetsSysSim.StellarTable.star_table_has_key(:wf_id)
wf_id = ExoplanetsSysSim.StellarTable.star_table(star_id,:wf_id)
else
wf_id = ExoplanetsSysSim.WindowFunction.get_window_function_id(ExoplanetsSysSim.StellarTable.star_table(star_id,:kepid))
end
for i_idx in 1:(length(limitP)-1)
for j_idx in 1:(length(limitRp)-1)
temp_bin = 0.0
for n_test in 1:num_realiz
pper = exp(Base.rand()*(log(limitP[i_idx+1])-log(limitP[i_idx]))+log(limitP[i_idx]))
prad = exp(Base.rand()*(log(limitRp[j_idx+1])-log(limitRp[j_idx]))+log(limitRp[j_idx]))
pgeo = ExoplanetsSysSim.calc_transit_prob_single_planet_approx(pper, star.radius, star.mass)
pdet = 0.0
pl_arr = Array{Planet}(undef,1)
orbit_arr = Array{Orbit}(undef,1)
incl = acos(Base.rand()*star.radius*ExoplanetsSysSim.rsol_in_au/ExoplanetsSysSim.semimajor_axis(pper, star.mass))
orbit_arr[1] = Orbit(pper, 0., incl, 0., 0., Base.rand()*2.0*pi)
pl_arr[1] = Planet(prad, 1.0e-6)
kep_targ = KeplerTarget([PlanetarySystem(star, pl_arr, orbit_arr)], repeat(cdpp_arr, outer=[1,1]),contam,data_span,duty_cycle,wf_id)
duration = ExoplanetsSysSim.calc_transit_duration(kep_targ,1,1)
if duration <= 0.
continue
end
ntr = ExoplanetsSysSim.calc_expected_num_transits(kep_targ, 1, 1, param)
depth = ExoplanetsSysSim.calc_transit_depth(kep_targ,1,1)
# Apply correction to snr if grazing transit
size_ratio = kep_targ.sys[1].planet[1].radius/kep_targ.sys[1].star.radius
b = ExoplanetsSysSim.calc_impact_parameter(kep_targ.sys[1],1)
snr_correction = ExoplanetsSysSim.calc_depth_correction_for_grazing_transit(b,size_ratio)
depth *= snr_correction
#cdpp = ExoplanetsSysSim.interpolate_cdpp_to_duration(kep_targ, duration)
cdpp = ExoplanetsSysSim.interpolate_cdpp_to_duration_lookup_cdpp(kep_targ, duration)
snr = ExoplanetsSysSim.calc_snr_if_transit_cdpp(kep_targ, depth, duration, cdpp, param, num_transit=ntr)
#kepid = ExoplanetsSysSim.StellarTable.star_table(kep_targ.sys[1].star.id, :kepid)
#osd_duration = ExoplanetsSysSim.get_legal_durations(pper,duration) #tests if durations are included in Kepler's observations for a certain planet period. If not, returns nearest possible duration
#osd = ExoplanetsSysSim.WindowFunction.interp_OSD_from_table(kepid, pper, osd_duration)
#if osd_duration > duration #use a correcting factor if this duration is lower than the minimum searched for this period.
# osd = osd*osd_duration/duration
#end
#snr = ExoplanetsSysSim.calc_snr_if_transit(kep_targ, depth, duration, osd, sim_param, num_transit=ntr)
pdet = ExoplanetsSysSim.calc_prob_detect_if_transit(kep_targ, snr, pper, duration, param, num_transit=ntr)
temp_bin += (pgeo*pdet)
end
ess_bin[(i_idx-1)*(length(limitRp)-1) + j_idx] += temp_bin/num_realiz
end
end
if verbose && rem(star_id, 10^convert(Int,floor(log10(num_targ)))) == 0.
println(string("Star #", star_id, " finished"))
end
end
if verbose
println("")
for i in 1:(length(limitP)-1)
for j in 1:(length(limitRp)-1)
println("Period limits: ", limitP[i:i+1], " / Radius limits: ", limitRp[j:j+1]/ExoplanetsSysSim.earth_radius, " / Stellar ESS = ", ess_bin[(i-1)*(length(limitRp)-1) + j])
end
end
end
return ess_bin
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 917 | ## ExoplanetsSysSim/examples/hsu_etal_2018/invdet_calc.jl
## (c) 2018 Danley C. Hsu
# Script for producing DR25 FGK planet candidate occurrence rate estimates
# using both the inverse detection efficiency and the simple
# Bayesian methods
using ExoplanetsSysSim
include(joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")),"examples","dr25_gaia_fgk", "dr25_binrates_func.jl"))
global sim_param_closure = setup_sim_param_dr25binrates()
sim_param_closure = set_test_param(sim_param_closure)
df_koi,usable_koi = read_koi_catalog(sim_param_closure)
println("# Finished reading in KOI data")
df_star = setup_star_table_dr25(sim_param_closure)
println("# Finished reading in stellar data")
cat_obs = setup_actual_planet_candidate_catalog(df_star, df_koi, usable_koi, sim_param_closure)
#@time inv_det_simp_bayes(cat_obs, sim_param_closure)
@time simp_bayes(cat_obs, sim_param_closure)
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 4332 | ## ExoplanetsSysSim/examples/dr25_gaia_m/abc_run.jl
## (c) 2019 Danley C. Hsu & Eric B. Ford
# Script for producing DR25 M planet candidate occurrence rate estimates
include("abc_setup.jl")
#using SysSimABC
using ExoplanetsSysSim
using JLD
using StatsBase
out2txt = false # Write occurrence rates & densities to text files
expandpart = false # Expand final generation for robust posteriors
prior_choice = "dirichlet"
bin_size_factor = 2.0
println("Setting up simulation...")
@time abc_plan = SysSimABC.setup_abc(prior_choice = prior_choice, bin_size_factor = bin_size_factor)
println("")
println("Running simulation...")
@time output = SysSimABC.run_abc(abc_plan)
# println("")
# println("Running simulation (part 2)...")
# @time abc_plan = SysSimABC.setup_abc_p2(abc_plan)
# @time output = SysSimABC.run_abc(abc_plan, output)
#@time abc_plan = change_distance()
#@time output = run_abc(abc_plan, output)
println("")
save(string("test-pop-out.jld"), "output", output, "ss_true", EvalSysSimModel.get_ss_obs())
if expandpart
println("Expanding to large generation...")
@time theta_largegen, weights_largegen = SysSimABC.run_abc_largegen(abc_plan, output, EvalSysSimModel.get_ss_obs(), output.accept_log.epsilon[end-1], npart=1000)
println("")
save(string("test-pop-out.jld"), "output", output, "ss_true", EvalSysSimModel.get_ss_obs(), "theta_largegen", theta_largegen, "weights_largegen", weights_largegen)
end
if out2txt
file_rate = open("rate_output.txt", "w")
file_dens = open("dens_output.txt", "w")
end
limitP = get_any(EvalSysSimModel.sim_param_closure, "p_lim_arr", Array{Float64,1})
limitR = get_any(EvalSysSimModel.sim_param_closure, "r_lim_arr", Array{Float64,1})
const r_dim = length(limitR)-1
if expandpart
weight_vec = aweights(weights_largegen)
#weight_vec = aweights(fill(1.0, length(weights_largegen)))
else
weight_vec = aweights(output.weights)
#weight_vec = aweights(fill(1.0, length(output.weights)))
end
for p_ind = 1:(length(limitP)-1)
col_ind = (p_ind-1)*(r_dim)+1
for r_ind = 1:r_dim
bin_ind = (p_ind-1)*(r_dim)+r_ind
dens_denom = 1.0/log(limitP[p_ind+1]/limitP[p_ind])/log(limitR[r_ind+1]/limitR[r_ind])
if prior_choice == "dirichlet" && r_dim > 1
col_ind = (p_ind-1)*(r_dim+1)+1
bin_ind = (p_ind-1)*(r_dim+1)+r_ind+1
if expandpart
quant_arr = quantile(theta_largegen[bin_ind,:].*theta_largegen[col_ind,:], weight_vec, [0.1587, 0.5, 0.8413])
else
quant_arr = quantile(output.theta[bin_ind,:].*output.theta[col_ind,:], weight_vec, [0.1587, 0.5, 0.8413])
end
elseif prior_choice == "beta"
col_lambda = bin_size_factor * 3 * log(limitP[p_ind+1]/limitP[p_ind])/log(2)
if expandpart
quant_arr = quantile(theta_largegen[bin_ind,:]*col_lambda, weight_vec, [0.1587, 0.5, 0.8413])
else
quant_arr = quantile(output.theta[bin_ind,:]*col_lambda, weight_vec, [0.1587, 0.5, 0.8413])
end
else
if expandpart
quant_arr = quantile(theta_largegen[bin_ind,:], weight_vec, [0.1587, 0.5, 0.8413])
else
quant_arr = quantile(output.theta[bin_ind,:], weight_vec, [0.1587, 0.5, 0.8413])
end
end
println("-----------------------------")
println("Orbital Period (day) = ", string(limitP[p_ind:p_ind+1]), " / Planet Radius (R_earth) = ", string(limitR[r_ind:r_ind+1]/ExoplanetsSysSim.earth_radius))
println("")
println("Rate = ", string(quant_arr[2], " + ", quant_arr[3]-quant_arr[2], " - ", quant_arr[2]-quant_arr[1]))
println("Density = ", string(quant_arr[2]*dens_denom, " + ", (quant_arr[3]-quant_arr[2])*dens_denom, " - ", (quant_arr[2]-quant_arr[1])*dens_denom))
if out2txt
write(file_rate, string(quant_arr[2], " + ", quant_arr[3]-quant_arr[2], " - ", quant_arr[2]-quant_arr[1], "\n"))
write(file_dens, string(quant_arr[2]*dens_denom, " + ", (quant_arr[3]-quant_arr[2])*dens_denom, " - ", (quant_arr[2]-quant_arr[1])*dens_denom, "\n"))
end
end
end
if out2txt
close(file_rate)
close(file_dens)
end
println("-----------------------------")
println("")
println(EvalSysSimModel.get_ss_obs())
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 12177 | ## ExoplanetsSysSim/examples/dr25_gaia_fgk/abc_setup.jl
## (c) 2019 Eric B. Ford & Danley C. Hsu
# Collection of functions which specific ABC simulation parameters
module EvalSysSimModel
export setup, get_param_vector, get_ss_obs
export gen_data, calc_summary_stats, calc_distance, is_valid_uniform, is_valid_beta, is_valid_dirichlet, normalize_dirch
using ExoplanetsSysSim
using ApproximateBayesianComputing
const ABC = ApproximateBayesianComputing
include(joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")),"examples","dr25_gaia_m", "dr25_binrates_func.jl"))
sim_param_closure = SimParam()
summary_stat_ref_closure = CatalogSummaryStatistics()
function is_valid_uniform(param_vector::Vector{Float64})
global sim_param_closure
update_sim_param_from_vector!(param_vector,sim_param_closure)
local rate_tab::Array{Float64,2} = get_any(sim_param_closure, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param_closure, "p_lim_arr", Array{Float64,1})
#const lambda = sum_kbn(rate_tab)
if any(x -> x < 0., rate_tab) || any([floor(3*log(limitP[i+1]/limitP[i])/log(2)) for i in 1:length(limitP)-1] .< sum(rate_tab, dims=1)')
return false
end
return true
end
function is_valid_dirichlet(param_vector::Vector{Float64})
global sim_param_closure
update_sim_param_from_vector!(param_vector,sim_param_closure)
local rate_tab::Array{Float64,2} = get_any(sim_param_closure, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "p_lim_arr", Array{Float64,1})
#const lambda = sum_kbn(rate_tab)
if any(x -> x < 0., rate_tab) || any([floor(3*log(limitP[i+1]/limitP[i])/log(2)) for i in 1:length(limitP)-1] .< rate_tab[1,:])
return false
end
return true
end
function normalize_dirch(param_vector::Vector{Float64})
global sim_param_closure
local p_dim = length(get_any(sim_param_closure, "p_lim_arr", Array{Float64,1}))-1
local r_dim = length(get_any(sim_param_closure, "r_lim_arr", Array{Float64,1}))-1
for i in 1:p_dim
param_vector[((i-1)*(r_dim+1)+2):((i-1)*(r_dim+1)+(r_dim+1))] ./= sum(param_vector[((i-1)*(r_dim+1)+2):((i-1)*(r_dim+1)+(r_dim+1))])
end
update_sim_param_from_vector!(param_vector,sim_param_closure)
return param_vector
end
function is_valid_mfgk_ratio(param_vector::Vector{Float64})
global sim_param_closure
update_sim_param_from_vector!(param_vector,sim_param_closure)
limitP::Array{Float64,1} = get_any(sim_param_closure, "p_lim_arr", Array{Float64,1})
rate_tab::Array{Float64,2} = get_any(sim_param_closure, "obs_par", Array{Float64,2})[:,1:length(limitP)]
const mfgk_ratio::Float64 = get_real(sim_param, "mfgk_ratio")
#const lambda = sum(rate_tab)
if any(x -> x < 0., rate_tab) || any([floor(3*log(limitP[i+1]/limitP[i])/log(2)) for i in 1:length(limitP)-1] .< mfgk_ratio*sum(rate_tab, dims=1)')
return false
end
return true
end
function gen_data(param_vector::Vector{Float64})
global sim_param_closure
update_sim_param_from_vector!(param_vector,sim_param_closure)
cat_phys = generate_kepler_physical_catalog(sim_param_closure)
#cat_phys_cut = ExoplanetsSysSim.generate_obs_targets(cat_phys, sim_param_closure)
#cat_obs = ExoplanetsSysSim.observe_kepler_targets_single_obs(cat_phys_cut, sim_param_closure)
cat_obs = ExoplanetsSysSim.observe_kepler_targets_sky_avg(cat_phys, sim_param_closure)
return cat_obs
end
# TODO OPT: Eventually, could adapt ABC.jl to use distance from first pass to decide if should compute additional summary statistics
function calc_summary_stats(cat::KeplerObsCatalog)
global sim_param_closure
sum_stat = calc_summary_stats_obs_binned_rates(cat, sim_param_closure, obs_skyavg = true)
return sum_stat
end
function calc_distance(sum_stat_obs::CatalogSummaryStatistics,sum_stat_sim::CatalogSummaryStatistics, n::Integer = 0)
global sim_param_closure
dist1 = calc_distance_vector_binned(sum_stat_obs,sum_stat_sim, 1, sim_param_closure)
num_available = length(dist1)
num_to_use = n>0 ? min(n,num_available) : num_available
return calc_scalar_distance(dist1[1:num_to_use])
end
function setup(prior_choice::String, bin_size_factor::Float64)
global sim_param_closure = setup_sim_param_dr25binrates()
add_param_fixed(sim_param_closure,"bin_size_factor",bin_size_factor)
if prior_choice == "dirichlet"
sim_param_closure = set_test_param_total(sim_param_closure)
add_param_fixed(sim_param_closure,"generate_num_planets",generate_num_planets_binrates_dirichlet)
if (length(get_any(sim_param_closure, "r_lim_arr", Array{Float64,1}))-1) > 1
add_param_fixed(sim_param_closure,"generate_period_and_sizes", generate_period_and_sizes_binrates_dirichlet)
end
elseif prior_choice == "beta"
sim_param_closure = set_test_param(sim_param_closure)
add_param_fixed(sim_param_closure,"generate_num_planets",generate_num_planets_binrates_beta)
add_param_fixed(sim_param_closure,"generate_period_and_sizes", generate_period_and_sizes_binrates_beta)
elseif prior_choice == "uniform"
sim_param_closure = set_test_param(sim_param_closure)
else
println("# Invalid prior given!")
quit()
end
### Use simulated planet candidate catalog data
# df_star = setup_star_table_christiansen(sim_param_closure)
# println("# Finished reading in stellar data")
# add_param_fixed(sim_param_closure,"num_kepler_targets",1000000) # For "observed" catalog
# cat_obs = simulated_read_kepler_observations(sim_param_closure)
# println("# Finished setting up simulated true catalog")
###
### Use real planet candidate catalog data
df_koi,usable_koi = read_koi_catalog(sim_param_closure)
println("# Finished reading in KOI data")
df_star = setup_star_table_dr25(sim_param_closure)
println("# Finished reading in stellar data")
cat_obs = setup_actual_planet_candidate_catalog(df_star, df_koi, usable_koi, sim_param_closure)
println("# Finished setting up true catalog")
###
global summary_stat_ref_closure = calc_summary_stats_obs_binned_rates(cat_obs,sim_param_closure, trueobs_cat = true)
end
get_param_vector() = make_vector_of_sim_param(sim_param_closure)
get_ss_obs() = summary_stat_ref_closure
function set_simparam_ss(sim_param::ExoplanetsSysSim.SimParam, ss_true::ExoplanetsSysSim.CatalogSummaryStatistics)
global sim_param_closure = sim_param
global summary_stat_ref_closure = ss_true
end
end # module EvalSysSimModel
#include(joinpath(Pkg.dir("ABC"),"src/composite.jl"))
module SysSimABC
export setup_abc, run_abc, run_abc_largegen, setup_abc_p2
using Distributions, Random, Distributed
using ApproximateBayesianComputing
const ABC = ApproximateBayesianComputing
import ApproximateBayesianComputing.CompositeDistributions.CompositeDist
import ApproximateBayesianComputing.TransformedBetaDistributions.LinearTransformedBeta
#using Compat
import ExoplanetsSysSim
import ..EvalSysSimModel
include(joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")),"examples","dr25_gaia_m", "dr25_binrates_func.jl"))
include(joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")),"examples","dr25_gaia_m", "beta_proposal.jl"))
function setup_abc(num_dist::Integer = 0; prior_choice::String = "uniform", bin_size_factor::Float64 = 1.5)
EvalSysSimModel.setup(prior_choice, bin_size_factor)
limitP::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "p_lim_arr", Array{Float64,1})
limitR::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "r_lim_arr", Array{Float64,1})
limitR_full::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "r_lim_full", Array{Float64,1})
local r_dim = length(limitR)-1
prior_arr = ContinuousDistribution[]
ss_obs_table = EvalSysSimModel.get_ss_obs().stat["planets table"]
if prior_choice == "ratio"
prior_arr = vcat(prior_arr, Uniform(0.0, 10.0))
elseif prior_choice == "dirichlet"
weights_arr = [log(limitR[j+1]/limitR[j]) for j in 1:r_dim]/minimum([log(limitR_full[k+1]/limitR_full[k]) for k in 1:(length(limitR_full)-1)])
for i in 1:(length(limitP)-1)
max_in_col = 3*log(limitP[i+1]/limitP[i])/log(2)
lambda_col = Uniform(0.0, max_in_col)
prior_arr = vcat(prior_arr, lambda_col)
if r_dim > 1
dirch_dist = Dirichlet(weights_arr)
prior_arr = vcat(prior_arr, dirch_dist)
end
end
else
for i in 1:(length(limitP)-1)
max_in_col = bin_size_factor*log(limitP[i+1]/limitP[i])/log(2)
for j in 1:r_dim
uniform_dist = Uniform(0.0, max_in_col*log(limitR[j+1]/limitR[j])/log(2))
prior_arr = vcat(prior_arr, uniform_dist)
end
end
end
param_prior = CompositeDist(prior_arr)
in_parallel = nworkers() > 1 ? true : false
calc_distance_ltd(sum_stat_obs::ExoplanetsSysSim.CatalogSummaryStatistics,sum_stat_sim::ExoplanetsSysSim.CatalogSummaryStatistics) = EvalSysSimModel.calc_distance(sum_stat_obs,sum_stat_sim,num_dist)
global abc_plan = ABC.abc_pmc_plan_type(EvalSysSimModel.gen_data,EvalSysSimModel.calc_summary_stats, calc_distance_ltd, param_prior, make_proposal_dist=make_proposal_dist_multidim_beta, is_valid=EvalSysSimModel.is_valid_uniform, num_part=500, num_max_attempt=200, num_max_times=200, epsilon_init=9.9e99, target_epsilon=1.0e-100, in_parallel=in_parallel, adaptive_quantiles = false, epsilon_reduction_factor=0.9, tau_factor=2.0);
if prior_choice == "ratio"
abc_plan.make_proposal_dist = make_proposal_dist_multidim_beta_ratio
abc_plan.is_valid = EvalSysSimModel.is_valid_mfgk_ratio
elseif prior_choice == "dirichlet" && r_dim > 1
abc_plan.make_proposal_dist = make_proposal_dist_multidim_beta_dirichlet
abc_plan.is_valid = EvalSysSimModel.is_valid_dirichlet
abc_plan.normalize = EvalSysSimModel.normalize_dirch
end
return abc_plan
end
# function setup_abc_p2(abc_plan::ABC.abc_pmc_plan_type)
# abc_plan.tau_factor = 2.0
# abc_plan.num_max_times = 200
# ExoplanetsSysSim.add_param_fixed(EvalSysSimModel.sim_param_closure,"num_targets_sim_pass_one",10000)
# return abc_plan
# end
function run_abc_largegen(abc_plan::ABC.abc_pmc_plan_type, pop::ABC.abc_population_type, ss_true::ExoplanetsSysSim.CatalogSummaryStatistics, epshist_targ::Float64; npart::Integer = 1000, num_dist::Integer = 0)
abc_plan.num_max_times = 1
println("# run_abc_largegen: ",EvalSysSimModel.sim_param_closure)
sampler_largegen = abc_plan.make_proposal_dist(pop, abc_plan.tau_factor)
theta_largegen = Array{Float64}(undef, size(pop.theta, 1), npart)
weight_largegen = Array{Float64}(undef, npart)
for i in 1:npart
theta_val, dist_largegen, attempts_largegen = ABC.generate_theta(abc_plan, sampler_largegen, ss_true, epshist_targ)
theta_largegen[:,i] = theta_val
prior_logpdf = Distributions.logpdf(abc_plan.prior,theta_val)
sampler_logpdf = Distributions.logpdf(sampler_largegen, theta_val)
weight_largegen[i] = exp(prior_logpdf-sampler_logpdf)
end
return theta_largegen, weight_largegen
end
function run_abc(abc_plan::ABC.abc_pmc_plan_type)
#global sim_param_closure
println("# run_abc: ",EvalSysSimModel.sim_param_closure)
ss_true = EvalSysSimModel.get_ss_obs()
#println("True catalog SS: ", ss_true)
pop_out = ABC.run_abc(abc_plan,ss_true;verbose=true)
end
function run_abc(abc_plan::ABC.abc_pmc_plan_type, pop::ABC.abc_population_type)
#global sim_param_closure
dist_threshold = maximum(pop.dist)
EvalSysSimModel.add_param_fixed(EvalSysSimModel.sim_param_closure,"minimum ABC dist skip pass 2",dist_threshold)
println("# run_abc: ",EvalSysSimModel.sim_param_closure)
ss_true = EvalSysSimModel.get_ss_obs()
pop_out = ABC.run_abc(abc_plan,ss_true,pop;verbose=true)
end
end # module SysSimABC
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 10443 | using ExoplanetsSysSim
using ApproximateBayesianComputing
const ABC = ApproximateBayesianComputing
using SpecialFunctions
using Statistics
import ApproximateBayesianComputing.CompositeDistributions.CompositeDist
import ApproximateBayesianComputing.TransformedBetaDistributions.LinearTransformedBeta
#import EvalSysSimModel
# https://en.wikipedia.org/wiki/Trigamma_function
function trigamma_x_gr_4(x::T) where T<: Real
1/x + 0.5/x^2 + 1/(6*x^3) - 1/(30*x^5) + 1/(42*x^7) - 1/(30*x^9) + 5/(66*x^11) - 691/(2730*x^13) + 7/(6*x^15)
end
function trigamma_x_lt_4(x::T) where T<: Real
n = floor(Int64,5-x)
z = x+n
val = trigamma_x_gr_4(z)
for i in 1:n
z -= 1
val += 1/z^2
end
val
end
function trigamma(x::T) where T<: Real
x >= 4 ? trigamma_x_gr_4(x) : trigamma_x_lt_4(x)
end
function var_weighted(x::AbstractArray{Float64,1}, w::AbstractArray{Float64,1} )
#println("# size(x) = ",size(x), " size(w) = ", size(w)); flush(stdout)
@assert(length(x)==length(w) )
sumw = sum(w)
@assert( sumw > 0. )
if(sumw!= 1.0)
w /= sum(w)
sumw = 1.0
end
sumw2 = sum(w.*w)
xbar = sum(x.*w)
covar = sum((x.-xbar).*(x.-xbar) .* w) * sumw/(sumw*sumw-sumw2)
end
function mom_alpha(x_bar::T, v_bar::T) where T<: Real
x_bar * (((x_bar * (1 - x_bar)) / v_bar) - 1)
end
function mom_beta(x_bar::T, v_bar::T) where T<: Real
(1 - x_bar) * (((x_bar * (1 - x_bar)) / v_bar) - 1)
end
# For algorithm, see https://scholarsarchive.byu.edu/cgi/viewcontent.cgi?article=2613&context=etd
function fit_beta_mle(x::AbstractArray{T,1}; tol::T = 1e-6, max_it::Int64 = 10, init_guess::AbstractArray{T,1} = Array{T}(undef,0), w::AbstractArray{T,1} = Array{T}(undef,0), verbose::Bool = false ) where T<: Real
lnxbar = length(w)>1 ? Statistics.mean(log.(x),AnalyticWeights(w)) : Compat.Statistics.mean(log.(x))
ln1mxbar = length(w)>1 ? Statistics.mean(log.(1.0.-x),AnalyticWeights(w)) : Compat.Statistics.mean(log.(1.0.-x))
function itterate( mle_guess::Vector{T} ) where T<:Real
(alpha, beta) = (mle_guess[1], mle_guess[2])
dgab = digamma(alpha+beta)
g1 = dgab - digamma(alpha) + lnxbar
g2 = dgab - digamma(beta) + ln1mxbar
tgab = trigamma(alpha+beta)
G = [dgab-trigamma(alpha) tgab; tgab tgab-trigamma(beta)]
mle_guess -= G \ [g1, g2]
end
local mle_new
if length(init_guess) != 2
xbar = length(w)>1 ? Compat.Statistics.mean(x,AnalyticWeights(w)) : Compat.Statistics.mean(x)
vbar = length(w)>1 ? Compat.Statistics.varm(x,xbar,AnalyticWeights(w)) : Compat.Statistics.varm(x,xbar)
mle_new = (vbar < xbar*(1.0-xbar)) ? [mom_alpha(xbar, vbar), mom_beta(xbar,vbar)] : ones(T,2)
else
mle_new = init_guess
end
if verbose
println("it = 0: ", mle_new)
end
if any(mle_new.<=zero(T))
println("# Warning: mean= ", xbar, " var= ",vbar," (alpha,beta)_init= ",mle_new," invalid, reinitializing to (1,1)")
verbose = true
mle_new = ones(T,2)
end
for i in 1:max_it
mle_old = mle_new
mle_new = itterate( mle_old )
epsilon = max(abs.(mle_old.-mle_new))
if verbose
println("# it = ", i, ": ", mle_new, " max(Delta alpha, Delta beta)= ", epsilon)
end
if epsilon < tol
break
end
end
return mle_new
end
function make_beta(x::AbstractArray{T,1}, w::AbstractArray{T,1};
xmean::T = Compat.Statistics.mean(x,AnalyticWeights(w)),
xvar::T = Compat.Statistics.varm(x,xmean,AnalyticWeights(w)), tau_factor::T=one(T) ) where T<:Real
alpha_beta = (xvar < xmean*(1.0-xmean)) ? [mom_alpha(xmean, xvar), mom_beta(xmean,xvar)] : ones(T,2)
if any(alpha_beta.<=zero(T))
alpha_beta = fit_beta_mle(x, w=w, init_guess=alpha_beta, verbose=true)
end
if any(alpha_beta.<=zero(T))
alpha_beta = ones(T,2)
else
if minimum(alpha_beta)>1.5*tau_factor && sum(alpha_beta)>=20.0*tau_factor
alpha_beta ./= tau_factor
end
end
#println("Radius relative: a= ",alpha_beta[1], " b= ",alpha_beta[2])
Beta(alpha_beta[1], alpha_beta[2])
end
function make_beta_transformed(x::AbstractArray{T,1}, w::AbstractArray{T,1}; xmin::T=zero(T), xmax::T=one(T), xmean::T = Compat.Statistics.mean(x,AnalyticWeights(w)), xvar::T = Compat.Statistics.varm(x,xmean,AnalyticWeights(w)), tau_factor::T=one(T) ) where T<:Real
alpha_beta = (xvar < xmean*(1.0-xmean)) ? [mom_alpha(xmean, xvar), mom_beta(xmean,xvar)] : ones(T,2)
if any(alpha_beta.<=zero(T))
alpha_beta = fit_beta_mle(x, w=w, init_guess=alpha_beta, verbose=true)
end
if any(alpha_beta.<=zero(T))
alpha_beta = ones(T,2)
else
if minimum(alpha_beta)>1.5*tau_factor && sum(alpha_beta)>=20.0*tau_factor
alpha_beta ./= tau_factor
end
end
#println("Total: a= ",alpha_beta[1], " b= ",alpha_beta[2])
LinearTransformedBeta(alpha_beta[1], alpha_beta[2], xmin=xmin, xmax=xmax)
end
function make_proposal_dist_multidim_beta(theta::AbstractArray{Float64,2}, weights::AbstractArray{Float64,1}, tau_factor::Float64; verbose::Bool = false)
global sim_param_closure
local limitP::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "p_lim_arr", Array{Float64,1})
local p_dim = length(limitP)-1
local r_dim = length(get_any(EvalSysSimModel.sim_param_closure, "r_lim_arr", Array{Float64,1}))-1
theta_mean = sum(theta.*weights',dims=2) # weighted mean for parameters
theta_var = ABC.var_weighted(theta'.-theta_mean',weights) # scaled, weighted covar for parameters
tau_factor_indiv = fill(tau_factor,length(theta_var))
dist_arr = ContinuousDistribution[]
for j in 1:p_dim
max_col_rate = 3*log(limitP[j+1]/limitP[j])/log(2)
col_startidx = (j-1)*r_dim+1
#tau_factor_indiv[col_startidx] = 2.0
#=
println("mean= ",theta_mean)
println("var= ",theta_var)
println("tau= ",tau_factor_indiv)
for i in 1:length(theta_mean)
println("a= ",alpha(theta_mean[i],tau_factor*theta_var[i]), " b= ",beta(theta_mean[i],tau_factor*theta_var[i]))
end
=#
dist_arr = vcat(dist_arr, ContinuousDistribution[make_beta_transformed(theta[i,:], weights, xmin=0.0, xmax=max_col_rate, xmean=theta_mean[i]/max_col_rate, xvar=theta_var[i]/max_col_rate^2, tau_factor=tau_factor_indiv[i]) for i in (col_startidx):(col_startidx+r_dim-1)])
end
dist = CompositeDist(dist_arr)
end
function make_proposal_dist_multidim_beta(pop::abc_population_type, tau_factor::Float64; verbose::Bool = false)
make_proposal_dist_multidim_beta(pop.theta, pop.weights, tau_factor, verbose=verbose)
end
function make_proposal_dist_multidim_beta_dirichlet(theta::AbstractArray{Float64,2}, weights::AbstractArray{Float64,1}, tau_factor::Float64; verbose::Bool = false)
global sim_param_closure
local limitP::Array{Float64,1} = get_any(EvalSysSimModel.sim_param_closure, "p_lim_arr", Array{Float64,1})
local p_dim = length(limitP)-1
local r_dim = length(get_any(EvalSysSimModel.sim_param_closure, "r_lim_arr", Array{Float64,1}))-1
theta_mean = sum(theta.*weights',dims=2) # weighted mean for parameters
theta_var = ABC.var_weighted(theta'.-theta_mean',weights) # scaled, weighted covar for parameters
tau_factor_indiv = fill(tau_factor,length(theta_var))
dist_arr = ContinuousDistribution[]
for j in 1:p_dim
max_col_rate = 3*log(limitP[j+1]/limitP[j])/log(2)
col_startidx = (j-1)*(r_dim+1)+1
#tau_factor_indiv[col_startidx] = 2.0
# if verbose
# println("total: ",theta_mean[col_startidx]," ",theta_var[col_startidx])
# end
# for i in (col_startidx+1):(col_startidx+r_dim)
# mean_ratio = sum(theta[col_startidx,:].*theta[i,:].*weights) /(theta_mean[col_startidx]*theta_mean[i]) # weighted mean for parameters
# var_ratio = var_weighted(vec(theta[col_startidx,:].*theta[i,:]).-(theta_mean[col_startidx]*theta_mean[i]),weights)/(2 * theta_mean[col_startidx] * theta_var[i]) # scaled, weighted covar for parameters
# if verbose
# println("i=",i,": ",theta_mean[i]," ",theta_var[i]," ratios: ",mean_ratio, " ",var_ratio)
# end
# var_ratio = var_ratio >= one(var_ratio) ? var_ratio : one(var_ratio)
# tau_factor_indiv[i] = tau_factor*var_ratio
# end
# if verbose
# flush(stdout)
# end
#=
println("mean= ",theta_mean)
println("var= ",theta_var)
println("tau= ",tau_factor_indiv)
for i in 1:length(theta_mean)
println("a= ",alpha(theta_mean[i],tau_factor*theta_var[i]), " b= ",beta(theta_mean[i],tau_factor*theta_var[i]))
end
=#
dist_arr = vcat(dist_arr, make_beta_transformed(theta[col_startidx,:], weights, xmin=0.0, xmax=max_col_rate, xmean=theta_mean[col_startidx]/max_col_rate, xvar=theta_var[col_startidx]/max_col_rate^2, tau_factor=tau_factor_indiv[col_startidx]), ContinuousDistribution[ make_beta(theta[i,:], weights, xmean=theta_mean[i], xvar=theta_var[i], tau_factor=tau_factor_indiv[i]) for i in (col_startidx+1):(col_startidx+r_dim)])
end
dist = CompositeDist(dist_arr)
end
function make_proposal_dist_multidim_beta_dirichlet(pop::abc_population_type, tau_factor::Float64; verbose::Bool = false)
make_proposal_dist_multidim_beta_dirichlet(pop.theta, pop.weights, tau_factor, verbose=verbose)
end
function make_proposal_dist_multidim_beta_ratio(theta::AbstractArray{Float64,2}, weights::AbstractArray{Float64,1}, tau_factor::Float64; verbose::Bool = false)
global sim_param_closure
theta_mean = sum(theta.*weights',dims=2) # weighted mean for parameters
theta_var = ABC.var_weighted(theta'.-theta_mean',weights) # scaled, weighted covar for parameters
prior_max = 15.0
dist_arr = ContinuousDistribution[make_beta_transformed(theta[1,:], weights, xmin=0.0, xmax=prior_max, xmean=theta_mean[1]/prior_max, xvar=theta_var[1]/prior_max^2, tau_factor=tau_factor)]
dist = CompositeDist(dist_arr)
end
function make_proposal_dist_multidim_beta_ratio(pop::abc_population_type, tau_factor::Float64; verbose::Bool = false)
make_proposal_dist_multidim_beta_ratio(pop.theta, pop.weights, tau_factor, verbose=verbose)
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 54947 | ## ExoplanetsSysSim/examples/dr25_gaia_m/dr25_binrates_func.jl
## (c) 2019 Danley C. Hsu & Eric B. Ford
# Collection of functions specific to estimating DR25
# planet candidate occurrence rates over a 2D period-radius grid
using ExoplanetsSysSim
using StatsFuns
using JLD
using CSV
using DataFrames
using Distributions
## simulation_parameters
macro isdefinedlocal(var)
quote
try
$(esc(var))
true
catch err
isa(err, UndefVarError) ? false : rethrow(err)
end
end
end
function setup_sim_param_dr25binrates(args::Vector{String} = String[] ) # allow this to take a list of parameter (e.g., from command line)
sim_param = ExoplanetsSysSim.SimParam()
add_param_fixed(sim_param,"max_tranets_in_sys",7)
add_param_fixed(sim_param,"generate_star",ExoplanetsSysSim.generate_star_dumb)
add_param_fixed(sim_param,"generate_planetary_system", ExoplanetsSysSim.generate_planetary_system_uncorrelated_incl)
add_param_fixed(sim_param,"generate_kepler_target",ExoplanetsSysSim.generate_kepler_target_from_table)
add_param_fixed(sim_param,"star_table_setup",setup_star_table_dr25)
add_param_fixed(sim_param,"stellar_catalog","q1q17_dr25_gaia_m.jld")
add_param_fixed(sim_param,"osd_file","dr25m_osds.jld")
add_param_fixed(sim_param,"generate_num_planets",generate_num_planets_binrates_uniform)
add_param_fixed(sim_param,"generate_planet_mass_from_radius",ExoplanetsSysSim.generate_planet_mass_from_radius_powerlaw)
add_param_fixed(sim_param,"vetting_efficiency",ExoplanetsSysSim.vetting_efficiency_none)
add_param_fixed(sim_param,"mr_power_index",2.0)
add_param_fixed(sim_param,"mr_const",1.0)
add_param_fixed(sim_param,"generate_period_and_sizes", generate_period_and_sizes_binrates_uniform)
add_param_fixed(sim_param,"p_lim_full",[0.5, 1., 2., 4., 8., 16., 32., 64., 128., 256., 500.])
add_param_fixed(sim_param,"r_lim_full",[0.25, 0.5, 0.75, 1., 1.25, 1.5, 1.75, 2., 2.5, 3., 4., 6., 8., 12., 16.]*ExoplanetsSysSim.earth_radius)
#p_dim = length(p_lim_arr_num)-1
#r_dim = length(r_lim_arr_num)-1
#rate_tab_init = reshape(fill(1.0, p_dim*r_dim)*0.01,(r_dim,p_dim))
#add_param_fixed(sim_param, "p_lim_arr", p_lim_arr_num)
#add_param_fixed(sim_param, "r_lim_arr", r_lim_arr_num*ExoplanetsSysSim.earth_radius)
#add_param_active(sim_param,"obs_par", rate_tab_init)
add_param_fixed(sim_param,"generate_e_omega",ExoplanetsSysSim.generate_e_omega_rayleigh)
add_param_fixed(sim_param,"sigma_hk",0.03)
add_param_fixed(sim_param,"sigma_incl",2.0) # degrees
add_param_fixed(sim_param,"calc_target_obs_sky_ave",ExoplanetsSysSim.calc_target_obs_sky_ave)
add_param_fixed(sim_param,"calc_target_obs_single_obs",ExoplanetsSysSim.calc_target_obs_single_obs)
add_param_fixed(sim_param,"transit_noise_model",ExoplanetsSysSim.transit_noise_model_diagonal)
return sim_param
end
function set_test_param(sim_param_closure::SimParam)
@eval(include(joinpath(pwd(),"param.in")))
if @isdefinedlocal(stellar_catalog)
@assert (typeof(stellar_catalog) == String)
add_param_fixed(sim_param_closure,"stellar_catalog",stellar_catalog)
end
if @isdefinedlocal(koi_catalog)
@assert (typeof(koi_catalog) == String)
add_param_fixed(sim_param_closure,"koi_catalog",koi_catalog)
end
if @isdefinedlocal(num_targ_sim)
@assert (typeof(num_targ_sim) == Int)
add_param_fixed(sim_param_closure,"num_targets_sim_pass_one",num_targ_sim)
end
if @isdefinedlocal(osd_file)
@assert (typeof(osd_file) == String)
add_param_fixed(sim_param_closure,"osd_file",osd_file)
end
@assert (typeof(p_bin_lim) == Array{Float64,1})
add_param_fixed(sim_param_closure, "p_lim_arr", p_bin_lim)
@assert (typeof(r_bin_lim) == Array{Float64,1})
add_param_fixed(sim_param_closure, "r_lim_arr", r_bin_lim*ExoplanetsSysSim.earth_radius)
p_dim = length(get_any(sim_param_closure, "p_lim_arr", Array{Float64,1}))-1
r_dim = length(get_any(sim_param_closure, "r_lim_arr", Array{Float64,1}))-1
n_bin = p_dim*r_dim
if @isdefinedlocal(rate_init)
if typeof(rate_init) <: Real
@assert (rate_init >= 0.0)
rate_init_list = fill(rate_init, n_bin)
else
rate_init_list = rate_init
end
@assert (ndims(rate_init_list) <= 2)
if ndims(rate_init_list) == 1
@assert (length(rate_init_list) == n_bin)
rate_tab_init = reshape(rate_init_list*0.01, (r_dim, p_dim))
else
@assert (size(rate_init_list) == (r_dim, p_dim))
rate_tab_init = rate_init_list*0.01
end
add_param_active(sim_param_closure, "obs_par", rate_tab_init)
else
rate_init_list = fill(1.0, n_bin)
rate_tab_init = reshape(rate_init_list*0.01, (r_dim, p_dim))
add_param_active(sim_param_closure, "obs_par", rate_tab_init)
end
return sim_param_closure
end
function set_test_param_total(sim_param_closure::SimParam)
@eval(include(joinpath(pwd(),"param.in")))
if @isdefinedlocal(stellar_catalog)
@assert (typeof(stellar_catalog) == String)
add_param_fixed(sim_param_closure,"stellar_catalog",stellar_catalog)
end
if @isdefinedlocal(koi_catalog)
@assert (typeof(koi_catalog) == String)
add_param_fixed(sim_param_closure,"koi_catalog",koi_catalog)
end
if @isdefinedlocal(num_targ_sim)
@assert (typeof(num_targ_sim) == Int)
add_param_fixed(sim_param_closure,"num_targets_sim_pass_one",num_targ_sim)
end
if @isdefinedlocal(osd_file)
@assert (typeof(osd_file) == String)
add_param_fixed(sim_param_closure,"osd_file",osd_file)
end
@assert (typeof(p_bin_lim) == Array{Float64,1})
add_param_fixed(sim_param_closure, "p_lim_arr", p_bin_lim)
@assert (typeof(r_bin_lim) == Array{Float64,1})
add_param_fixed(sim_param_closure, "r_lim_arr", r_bin_lim*ExoplanetsSysSim.earth_radius)
p_dim = length(get_any(sim_param_closure, "p_lim_arr", Array{Float64,1}))-1
r_dim = length(get_any(sim_param_closure, "r_lim_arr", Array{Float64,1}))-1
n_bin = p_dim*r_dim
if @isdefinedlocal(rate_init)
if typeof(rate_init) <: Real
@assert (rate_init >= 0.0)
rate_init_list = fill(rate_init, n_bin)
else
rate_init_list = rate_init
end
@assert (ndims(rate_init_list) <= 2)
if ndims(rate_init_list) == 1
@assert (length(rate_init_list) == n_bin)
rate_tab_init = reshape(rate_init_list*0.01, (r_dim, p_dim))
else
@assert (size(rate_init_list) == (r_dim, p_dim))
rate_tab_init = rate_init_list*0.01
end
if r_dim > 1
lamb_col = sum(rate_tab_init, dims=1)
rate_tab_init = rate_tab_init ./ lamb_col
rate_tab_init = vcat(lamb_col, rate_tab_init)
end
add_param_active(sim_param_closure, "obs_par", rate_tab_init)
else
rate_init_list = fill(1.0, n_bin)
rate_tab_init = reshape(rate_init_list*0.01, (r_dim, p_dim))
if r_dim > 1
lamb_col = sum(rate_tab_init, dims=1)
rate_tab_init = rate_tab_init ./ lamb_col
rate_tab_init = vcat(lamb_col, rate_tab_init)
end
add_param_active(sim_param_closure, "obs_par", rate_tab_init)
end
if r_dim == 1
add_param_fixed(sim_param_closure,"generate_period_and_sizes", generate_period_and_sizes_christiansen_single_rp)
end
return sim_param_closure
end
function set_test_param_ratio(sim_param_closure::SimParam)
@eval(include(joinpath(pwd(),"param.in")))
if @isdefinedlocal(stellar_catalog)
@assert (typeof(stellar_catalog) == String)
add_param_fixed(sim_param_closure,"stellar_catalog",stellar_catalog)
end
if @isdefinedlocal(koi_catalog)
@assert (typeof(koi_catalog) == String)
add_param_fixed(sim_param_closure,"koi_catalog",koi_catalog)
end
if @isdefinedlocal(num_targ_sim)
@assert (typeof(num_targ_sim) == Int)
add_param_fixed(sim_param_closure,"num_targets_sim_pass_one",num_targ_sim)
end
if @isdefinedlocal(osd_file)
@assert (typeof(osd_file) == String)
add_param_fixed(sim_param_closure,"osd_file",osd_file)
end
@assert (typeof(p_bin_lim) == Array{Float64,1})
add_param_fixed(sim_param_closure, "p_lim_arr", p_bin_lim)
@assert (typeof(r_bin_lim) == Array{Float64,1})
add_param_fixed(sim_param_closure, "r_lim_arr", r_bin_lim*ExoplanetsSysSim.earth_radius)
p_dim = length(get_any(sim_param_closure, "p_lim_full", Array{Float64,1}))-1
r_dim = length(get_any(sim_param_closure, "r_lim_full", Array{Float64,1}))-1
n_bin = p_dim*r_dim
if @isdefinedlocal(rate_init)
if typeof(rate_init) <: Real
@assert (rate_init >= 0.0)
rate_init_list = fill(rate_init, n_bin)
else
rate_init_list = rate_init
end
@assert (ndims(rate_init_list) <= 2)
if ndims(rate_init_list) == 1
@assert (length(rate_init_list) == n_bin)
rate_tab_init = reshape(rate_init_list*0.01, (r_dim, p_dim))
else
@assert (size(rate_init_list) == (r_dim, p_dim))
rate_tab_init = rate_init_list*0.01
end
add_param_fixed(sim_param_closure, "obs_par", rate_tab_init)
else
rate_init_list = fill(1.0, n_bin)
rate_tab_init = reshape(rate_init_list*0.01, (r_dim, p_dim))
add_param_fixed(sim_param_closure, "obs_par", rate_tab_init)
end
return sim_param_closure
end
## planetary_system
function draw_uniform_selfavoiding(n::Integer; lower_bound::Real=0.0, upper_bound=1.0, min_separation::Real = 0.05, return_sorted::Bool=false )
@assert(n>=1)
@assert(upper_bound>lower_bound)
@assert(2*min_separation*n<upper_bound-lower_bound)
list = rand(n)
sorted_idx = collect(1:n)
segment_length = upper_bound-lower_bound
list[1] = lower_bound+segment_length*list[1] # First draw is standard uniform
segment_length -= min(upper_bound,list[1]+min_separation)-max(lower_bound,list[1]-min_separation)
for i in 2:n
segment_length -= min(upper_bound,list[i-1]+min_separation)-max(lower_bound,list[i-1]-min_separation) # Reduce length for future draws
list[i] *= segment_length # Draw over reduced range based on which segments need to be excluded
list[i] += lower_bound
j = 1
while j<= i-1 # Checking for conflicts
k = sorted_idx[j] # Going from low to high
if list[i]>list[k]-min_separation # If too close, then bu
list[i] += min(upper_bound,list[k]+min_separation)-max(lower_bound,list[k]-min_separation)
else
break
end
j += 1
end
for k in i:-1:(j+1) # Keep larger values sorted
sorted_idx[k]=sorted_idx[k-1]
end
sorted_idx[j] = i # Save order for this draw
#segment_length -= min(upper_bound,list[i]+min_separation)-max(lower_bound,list[i]-min_separation) # Reduce length for future draws
end
return return_sorted ? list[sorted_idx] : list
end
function generate_num_planets_binrates_uniform(s::Star, sim_param::SimParam)
local max_tranets_in_sys::Int64 = get_int(sim_param,"max_tranets_in_sys") # TODO SCI: Is 7 planets max per system OK, even when fitting across potentially 9 period bins?
#local max_tranets_per_P::Int64 = 3 # Set maximum number of planets per period range as loose stability criteria and to prevent near-crossing orbits
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_arr", Array{Float64,1})
local p_dim = length(limitP)-1
local r_dim = length(get_any(sim_param, "r_lim_arr", Array{Float64,1}))-1
sum_lambda = 0
for i in 1:p_dim
sum_lambda += ExoplanetsSysSim.generate_num_planets_poisson(sum(rate_tab[:,i]), convert(Int64, floor(3*log(limitP[i+1]/limitP[i])/log(2))))
end
#println("# lambda= ", sum_lambda)
return min(sum_lambda, max_tranets_in_sys)
end
function generate_num_planets_binrates_dirichlet(s::Star, sim_param::SimParam)
local max_tranets_in_sys::Int64 = get_int(sim_param,"max_tranets_in_sys") # TODO SCI: Is 7 planets max per system OK, even when fitting across potentially 9 period bins?
#local max_tranets_per_P::Int64 = 3 # Set maximum number of planets per period range as loose stability criteria and to prevent near-crossing orbits
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_arr", Array{Float64,1})
local p_dim = length(limitP)-1
local r_dim = length(get_any(sim_param, "r_lim_arr", Array{Float64,1}))-1
sum_lambda = 0
for i in 1:p_dim
sum_lambda += ExoplanetsSysSim.generate_num_planets_poisson(rate_tab[1,i], convert(Int64, floor(3*log(limitP[i+1]/limitP[i])/log(2))))
end
#println("# lambda= ", sum_lambda)
return min(sum_lambda, max_tranets_in_sys)
end
function generate_num_planets_m_fgk_ratio(s::Star, sim_param::SimParam)
local max_tranets_in_sys::Int64 = get_int(sim_param,"max_tranets_in_sys") # TODO SCI: Is 7 planets max per system OK, even when fitting across potentially 9 period bins?
#local max_tranets_per_P::Int64 = 3 # Set maximum number of planets per period range as loose stability criteria and to prevent near-crossing orbits
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
mfgk_ratio::Float64 = get_real(sim_param, "mfgk_ratio")
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_full", Array{Float64,1})
local p_dim = length(limitP)-1
local r_dim = length(get_any(sim_param, "r_lim_full", Array{Float64,1}))-1
sum_lambda = 0
for i in 1:p_dim
sum_lambda += ExoplanetsSysSim.generate_num_planets_poisson(mfgk_ratio*sum(rate_tab[:,i]), convert(Int64, floor(3*log(limitP[i+1]/limitP[i])/log(2))))
end
#println("# lambda= ", sum_lambda)
return min(sum_lambda, max_tranets_in_sys)
end
function generate_period_and_sizes_binrates_uniform(s::Star, sim_param::SimParam; num_pl::Integer = 1)
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(sim_param, "r_lim_arr", Array{Float64,1})
local r_dim = length(limitRp)-1
sepa_min = 0.05 # Minimum orbital separation in AU
backup_sepa_factor_slightly_less_than_one = 0.95
@assert ((length(limitP)-1) == size(rate_tab, 2))
@assert ((length(limitRp)-1) == size(rate_tab, 1))
Plist = zeros(num_pl)
Rplist = zeros(num_pl)
rate_tab_1d = reshape(rate_tab,length(rate_tab))
maxcuml = sum(rate_tab_1d)
cuml = cumsum(rate_tab_1d/maxcuml)
# We assume uniform sampling in log P and log Rp within each bin
j_idx = ones(Int64, num_pl)
for n in 1:num_pl
rollp = Base.rand()
idx = findfirst(x -> x > rollp, cuml)
i_idx = (idx-1)%size(rate_tab,1)+1
j_idx[n] = floor(Int64,(idx-1)//size(rate_tab,1))+1
Rplist[n] = exp(Base.rand()*(log(limitRp[i_idx+1])-log(limitRp[i_idx]))+log(limitRp[i_idx]))
end
for j in 1:(length(limitP)-1)
tmp_ind = findall(x -> x == j, j_idx)
if length(tmp_ind) > 0
redraw_att = 0
invalid_config = true
while invalid_config && redraw_att < 20
n_range = length(tmp_ind)
loga_min = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass))
loga_min_ext = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass)+sepa_min) # Used for determining minimum semimajor axis separation
loga_max = log(ExoplanetsSysSim.semimajor_axis(limitP[j+1], s.mass))
logsepa_min = min(loga_min_ext-loga_min, (loga_max-loga_min)/n_range/2*backup_sepa_factor_slightly_less_than_one) # Prevents minimum separations too large
tmp_logalist = draw_uniform_selfavoiding(n_range,min_separation=logsepa_min,lower_bound=loga_min,upper_bound=loga_max)
tmp_Plist = exp.((3*tmp_logalist .- log(s.mass))/2)*ExoplanetsSysSim.day_in_year # Convert from log a (in AU) back to P (in days)
invalid_config = false
redraw_att += 1
for n in 1:n_range
if tmp_Plist[n] < limitP[j] || tmp_Plist[n] > limitP[j+1]
invalid_config = true
else
Plist[tmp_ind[n]] = tmp_Plist[n]
end
end
end
end
end
return Plist, Rplist
end
function generate_period_and_sizes_binrates_dirichlet(s::Star, sim_param::SimParam; num_pl::Integer = 1)
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(sim_param, "r_lim_arr", Array{Float64,1})
local r_dim = length(limitRp)-1
sepa_min = 0.05 # Minimum orbital separation in AU
backup_sepa_factor_slightly_less_than_one = 0.95
@assert ((length(limitP)-1) == size(rate_tab, 2))
@assert ((length(limitRp)-1) == (size(rate_tab, 1)-1))
Plist = zeros(num_pl)
Rplist = zeros(num_pl)
maxcuml = sum(rate_tab[1,:])
cuml = cumsum(rate_tab[1,:]/maxcuml)
# We assume uniform sampling in log P and log Rp within each bin
j_idx = ones(Int64, num_pl)
for n in 1:num_pl
rollp = Base.rand()
j_idx[n] = findfirst(x -> x > rollp, cuml)
end
for j in 1:(length(limitP)-1)
tmp_ind = find(x -> x == j, j_idx)
if length(tmp_ind) > 0
redraw_att = 0
invalid_config = true
while invalid_config && redraw_att < 20
n_range = length(tmp_ind)
loga_min = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass))
loga_min_ext = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass)+sepa_min) # Used for determining minimum semimajor axis separation
loga_max = log(ExoplanetsSysSim.semimajor_axis(limitP[j+1], s.mass))
logsepa_min = min(loga_min_ext-loga_min, (loga_max-loga_min)/n_range/2*backup_sepa_factor_slightly_less_than_one) # Prevents minimum separations too large
tmp_logalist = draw_uniform_selfavoiding(n_range,min_separation=logsepa_min,lower_bound=loga_min,upper_bound=loga_max)
tmp_Plist = exp.((3*tmp_logalist - log(s.mass))/2)*ExoplanetsSysSim.day_in_year # Convert from log a (in AU) back to P (in days)
rad_dist = Distributions.Categorical(rate_tab[((j-1)*(r_dim+1)+2):((j-1)*(r_dim+1)+(r_dim+1))]) # Distribution for fraction of times the next planet draw would be assigned to a given radius bin
invalid_config = false
redraw_att += 1
for n in 1:n_range
if tmp_Plist[n] < limitP[j] || tmp_Plist[n] > limitP[j+1]
invalid_config = true
else
Plist[tmp_ind[n]] = tmp_Plist[n]
end
i_idx = rand(rad_dist)
Rplist[tmp_ind[n]] = exp(Base.rand()*(log(limitRp[i_idx+1])-log(limitRp[i_idx]))+log(limitRp[i_idx]))
end
end
end
end
return Plist, Rplist
end
function generate_period_and_sizes_m_fgk_ratio(s::Star, sim_param::SimParam; num_pl::Integer = 1)
rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
#mfgk_ratio::Float64 = get_real(sim_param, "mfgk_ratio")
limitP::Array{Float64,1} = get_any(sim_param, "p_lim_full", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(sim_param, "r_lim_full", Array{Float64,1})
local r_dim = length(limitRp)-1
sepa_min = 0.05 # Minimum orbital separation in AU
backup_sepa_factor_slightly_less_than_one = 0.95
@assert ((length(limitP)-1) == size(rate_tab, 2))
@assert ((length(limitRp)-1) == size(rate_tab, 1))
Plist = zeros(num_pl)
Rplist = zeros(num_pl)
rate_tab_1d = reshape(rate_tab,length(rate_tab))
maxcuml = sum(rate_tab_1d)
cuml = cumsum(rate_tab_1d/maxcuml)
# We assume uniform sampling in log P and log Rp within each bin
j_idx = ones(Int64, num_pl)
for n in 1:num_pl
rollp = Base.rand()
idx = findfirst(x -> x > rollp, cuml)
i_idx = (idx-1)%size(rate_tab,1)+1
j_idx[n] = floor(Int64,(idx-1)//size(rate_tab,1))+1
Rplist[n] = exp(Base.rand()*(log(limitRp[i_idx+1])-log(limitRp[i_idx]))+log(limitRp[i_idx]))
end
for j in 1:(length(limitP)-1)
tmp_ind = find(x -> x == j, j_idx)
if length(tmp_ind) > 0
redraw_att = 0
invalid_config = true
while invalid_config && redraw_att < 20
n_range = length(tmp_ind)
loga_min = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass))
loga_min_ext = log(ExoplanetsSysSim.semimajor_axis(limitP[j], s.mass)+sepa_min) # Used for determining minimum semimajor axis separation
loga_max = log(ExoplanetsSysSim.semimajor_axis(limitP[j+1], s.mass))
logsepa_min = min(loga_min_ext-loga_min, (loga_max-loga_min)/n_range/2*backup_sepa_factor_slightly_less_than_one) # Prevents minimum separations too large
tmp_logalist = draw_uniform_selfavoiding(n_range,min_separation=logsepa_min,lower_bound=loga_min,upper_bound=loga_max)
tmp_Plist = exp.((3*tmp_logalist .- log(s.mass))/2)*ExoplanetsSysSim.day_in_year # Convert from log a (in AU) back to P (in days)
invalid_config = false
redraw_att += 1
for n in 1:n_range
if tmp_Plist[n] < limitP[j] || tmp_Plist[n] > limitP[j+1]
invalid_config = true
else
Plist[tmp_ind[n]] = tmp_Plist[n]
end
end
end
end
end
return Plist, Rplist
end
## stellar_table
function setup_dr25(sim_param::SimParam; force_reread::Bool = false)
#global df
wf = WindowFunction.setup_window_function(sim_param)
WindowFunction.setup_OSD_interp(sim_param) #read in osd files so they can be interpolated
df = ExoplanetsSysSim.StellarTable.df
if haskey(sim_param,"read_stellar_catalog") && !force_reread
return df
#return data
end
stellar_catalog_filename = convert(String,joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")), "data", convert(String,get(sim_param,"stellar_catalog","q1_q17_dr25_stellar.csv")) ) )
df = setup_dr25(stellar_catalog_filename)
add_param_fixed(sim_param,"read_stellar_catalog",true)
add_param_fixed(sim_param,"num_kepler_targets",StellarTable.num_usable_in_star_table())
if !haskey(sim_param.param,"num_targets_sim_pass_one")
add_param_fixed(sim_param,"num_targets_sim_pass_one", StellarTable.num_usable_in_star_table())
end
StellarTable.set_star_table(df)
return df
end
function setup_dr25(filename::String; force_reread::Bool = false)
#global df, usable
df = ExoplanetsSysSim.StellarTable.df
#usable = ExoplanetsSysSim.StellarTable.usable
if occursin(r".jld2$",filename)
try
data = load(filename)
df = data["stellar_catalog"]
#usable::Array{Int64,1} = data["stellar_catalog_usable"]
Core.typeassert(df,DataFrame)
StellarTable.set_star_table(df)
catch
error(string("# Failed to read stellar catalog >",filename,"< in jld2 format."))
end
else
try
df = CSV.read(filename)
catch
error(string("# Failed to read stellar catalog >",filename,"< in ascii format."))
end
has_mass = .! (ismissing.(df[:mass]) .| ismissing.(df[:mass_err1]) .| ismissing.(df[:mass_err2]))
has_radius = .! (ismissing.(df[:radius]) .| ismissing.(df[:radius_err1]) .| ismissing.(df[:radius_err2]))
has_dens = .! (ismissing.(df[:dens]) .| ismissing.(df[:dens_err1]) .| ismissing.(df[:dens_err2]))
has_cdpp = .! (ismissing.(df[:rrmscdpp01p5]) .| ismissing.(df[:rrmscdpp02p0]) .| ismissing.(df[:rrmscdpp02p5]) .| ismissing.(df[:rrmscdpp03p0]) .| ismissing.(df[:rrmscdpp03p5]) .| ismissing.(df[:rrmscdpp04p5]) .| ismissing.(df[:rrmscdpp05p0]) .| ismissing.(df[:rrmscdpp06p0]) .| ismissing.(df[:rrmscdpp07p5]) .| ismissing.(df[:rrmscdpp09p0]) .| ismissing.(df[:rrmscdpp10p5]) .| ismissing.(df[:rrmscdpp12p0]) .| ismissing.(df[:rrmscdpp12p5]) .| ismissing.(df[:rrmscdpp15p0]))
has_ld = .! (ismissing.(df[:limbdark_coeff1]) .| ismissing.(df[:limbdark_coeff2]) .| ismissing.(df[:limbdark_coeff3]) .| ismissing.(df[:limbdark_coeff4]))
has_rest = .! (ismissing.(df[:dataspan]) .| ismissing.(df[:dutycycle]))
in_Q1Q12 = []
obs_gt_5q = []
for x in df[:st_quarters]
subx = string(x)
num_q_obs = length(matchall(r"1", subx))
push!(obs_gt_5q, num_q_obs>5)
subx = ("0"^(17-length(subx)))*subx
indQ = search(subx, '1')
if ((indQ < 1) | (indQ > 12)) || num_q_obs<=5
push!(in_Q1Q12, false)
else
push!(in_Q1Q12, true)
end
end
is_FGK = []
for x in 1:length(df[:teff])
if ((df[x,:teff] > 4000.0) & (df[x,:teff] < 7000.0) & (df[x,:logg] > 4.0))
push!(is_FGK, true)
else
push!(is_FGK, false)
end
end
is_usable = has_radius .& is_FGK .& has_mass .& has_rest .& has_dens .& has_cdpp .& obs_gt_5q .& has_ld
if contains(filename,"q1_q16_stellar.csv")
is_usable = is_usable .& in_Q1Q12
end
# See options at: http://exoplanetarchive.ipac.caltech.edu/docs/API_keplerstellar_columns.html
symbols_to_keep = [ :kepid, :mass, :mass_err1, :mass_err2, :radius, :radius_err1, :radius_err2, :dens, :dens_err1, :dens_err2, :rrmscdpp01p5, :rrmscdpp02p0, :rrmscdpp02p5, :rrmscdpp03p0, :rrmscdpp03p5, :rrmscdpp04p5, :rrmscdpp05p0, :rrmscdpp06p0, :rrmscdpp07p5, :rrmscdpp09p0, :rrmscdpp10p5, :rrmscdpp12p0, :rrmscdpp12p5, :rrmscdpp15p0, :cdppslplong, :cdppslpshrt, :dataspan, :dutycycle, :limbdark_coeff1, :limbdark_coeff2, :limbdark_coeff3, :limbdark_coeff4 ]
delete!(df, [~(x in symbols_to_keep) for x in names(df)]) # delete columns that we won't be using anyway
usable = findall(is_usable)
df = df[usable, symbols_to_keep]
tmp_df = DataFrame()
for col in names(df)
tmp_df[col] = collect(skipmissing(df[col]))
end
df = tmp_df
mast_df = CSV.read(convert(String,joinpath(abspath(joinpath(dirname(Base.find_package("ExoplanetsSysSim")),"..")), "data", "KeplerMAST_TargetProperties.csv")))
delete!(mast_df, [~(x in [:kepid, :contam]) for x in names(mast_df)])
df = join(df, mast_df, on=:kepid)
StellarTable.set_star_table(df)
end
println("# Removing stars observed <5 quarters.")
df[!,:wf_id] = map(x->ExoplanetsSysSim.WindowFunction.get_window_function_id(x,use_default_for_unknown=false),df[!,:kepid])
obs_5q = df[!,:wf_id].!=-1
df = df[obs_5q, names(df)]
StellarTable.set_star_table(df)
return df
end
setup_star_table_dr25(sim_param::SimParam; force_reread::Bool = false) = setup_dr25(sim_param, force_reread=force_reread)
setup_star_table_dr25(filename::String; force_reread::Bool = false) = setup_dr25(filename, force_reread=force_reread)
## summary_statistics
function calc_summary_stats_obs_binned_rates(cat_obs::KeplerObsCatalog, param::SimParam; trueobs_cat::Bool = false, obs_skyavg::Bool = false)
ssd = Dict{String,Any}()
cache = Dict{String,Any}()
if !trueobs_cat
ssd["num targets"] = get_int(param,"num_targets_sim_pass_one")
else
ssd["num targets"] = get_int(param,"num_kepler_targets")
end
max_tranets_in_sys = get_int(param,"max_tranets_in_sys") # Demo that simulation parameters can specify how to evalute models, too
@assert max_tranets_in_sys >= 1
idx_tranets = findall(x::KeplerTargetObs-> length(x.obs) > 0, cat_obs.target)::Array{Int64,1} # Find indices of systems with at least 1 tranet = potentially detectable transiting planet
# Count total number of tranets and compile indices for N-tranet systems
num_tranets = 0
idx_n_tranets = Vector{Int64}[ Int64[] for m = 1:max_tranets_in_sys]
for n in 1:max_tranets_in_sys-1
idx_n_tranets[n] = findall(x::KeplerTargetObs-> length(x.obs) == n, cat_obs.target[idx_tranets] )
num_tranets += n*length(idx_n_tranets[n])
end
idx_n_tranets[max_tranets_in_sys] = findall(x::KeplerTargetObs-> length(x.obs) >= max_tranets_in_sys, cat_obs.target[idx_tranets] )
num_tranets += max_tranets_in_sys*length(idx_n_tranets[max_tranets_in_sys]) # WARNING: this means we need to ignore planets w/ indices > max_tranets_in_sys
if ( length( findall(x::KeplerTargetObs-> length(x.obs) > max_tranets_in_sys, cat_obs.target[idx_tranets] ) ) > 0) # Make sure max_tranets_in_sys is at least big enough for observed systems
warn("Observational data has more transiting planets in one systems than max_tranets_in_sys allows.")
end
num_tranets = convert(Int64,num_tranets) # TODO OPT: Figure out why isn't this already an Int. I may be doing something that prevents some optimizations
num_sys_tranets = zeros(max_tranets_in_sys) # Since observed data, don't need to calculate probabilities.
for n in 1:max_tranets_in_sys # Make histogram of N-tranet systems
num_sys_tranets[n] = length(idx_n_tranets[n])
end
ssd["num_sys_tranets"] = num_sys_tranets
ssd["planets detected"] = num_tranets
period_list = zeros(num_tranets)
weight_list = zeros(num_tranets)
radius_list = zeros(num_tranets)
n = 1 # tranet id
if !trueobs_cat
for i in idx_tranets
ld = ExoplanetsSysSim.LimbDarkeningParam4thOrder(ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff1), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff2), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff3), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff4) )
flux_ratio = (1.0+ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id, :contam))/1.0 # WARNING: Assumes flux = 1
#Rstar = trueobs_cat ? cat_obs.target[i].star.radius : ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id, :radius)
for j in 1:num_planets(cat_obs.target[i])
period_list[n] = cat_obs.target[i].obs[j].period
if obs_skyavg
weight_list[n] = min(ExoplanetsSysSim.prob_detect(cat_obs.target[i].prob_detect,j), 1.0) # CHECK WHAT THIS DOES
else
weight_list[n] = 1.0
end
radius_ratio = ExoplanetsSysSim.ratio_from_depth(cat_obs.target[i].obs[j].depth*flux_ratio, ld)
radius_list[n] = radius_ratio*ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id, :radius)
#radius_list[n] = sqrt(cat_obs.target[i].obs[j].depth)*cat_obs.target[i].star.radius
#radius_list[n] = sqrt(cat_obs.target[i].obs[j].depth)*Rstar
n = n+1
end
end
else
for i in idx_tranets
ld = ExoplanetsSysSim.LimbDarkeningParam4thOrder(ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff1), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff2), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff3), ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id,:limbdark_coeff4) )
flux_ratio = (1.0+ExoplanetsSysSim.StellarTable.star_table(cat_obs.target[i].star.id, :contam))/1.0 # WARNING: Assumes flux = 1
for j in 1:num_planets(cat_obs.target[i])
period_list[n] = cat_obs.target[i].obs[j].period
weight_list[n] = 1.0
radius_ratio = ExoplanetsSysSim.ratio_from_depth(cat_obs.target[i].obs[j].depth*flux_ratio, ld)
radius_list[n] = radius_ratio*cat_obs.target[i].star.radius
#radius_list[n] = sqrt(cat_obs.target[i].obs[j].depth)*cat_obs.target[i].star.radius
n = n+1
end
end
end
#ssd["period_list"] = period_list
ssd["weight_list"] = weight_list
#ssd["radius_list"] = radius_list
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
np_bin = zeros((length(limitP)-1) * (length(limitRp)-1))
np_bin_idx = 1
bin_match_list = fill(fill(0,0),(length(limitP)-1)*(length(limitRp)-1))
for i in 1:(length(limitP)-1)
P_match = findall(x -> ((x > limitP[i]) && (x < limitP[i+1])), period_list)
for j in 1:(length(limitRp)-1)
R_match = findall(x -> ((x > limitRp[j]) && (x < limitRp[j+1])), radius_list)
bin_match = intersect(P_match, R_match)
bin_match_list[np_bin_idx] = bin_match
np_bin[np_bin_idx] = sum(weight_list[bin_match])
np_bin_idx += 1
end
end
cache["bin_match_list"] = bin_match_list
#ssd["planets detected"] = sum(np_bin)
ssd["planets table"] = np_bin
return CatalogSummaryStatistics(ssd, cache)
end
## abc_distance
function calc_distance_vector_binned(summary1::CatalogSummaryStatistics, summary2::CatalogSummaryStatistics, pass::Int64, sim_param::SimParam ; verbose::Bool = false)
p_dim = length(get_any(sim_param, "p_lim_arr", Array{Float64,1}))-1
r_dim = length(get_any(sim_param, "r_lim_arr", Array{Float64,1}))-1
#rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
d = Array{Float64}(undef,0)
if pass == 1
if verbose
println("# Summary 1, pass 1: ",summary1)
println("# Summary 2, pass 1: ",summary2)
end
d = zeros(3)
np1 = haskey(summary1.stat,"planets table") ? summary1.stat["planets table"] : summary1.stat["expected planets table"]
np2 = haskey(summary2.stat,"planets table") ? summary2.stat["planets table"] : summary2.stat["expected planets table"]
np_bin = zeros(length(np1))
num_detect_sim = zeros(length(np1))
### Bernoulli distance
bin_match_list = summary2.cache["bin_match_list"]
@assert length(bin_match_list) == length(np1)
np2 = zeros(Int64,length(np1))
###
for n in 1:length(np1)
#np_bin[n] = dist_L1_abs(np1[n]/summary1.stat["num targets"], np2[n]/summary2.stat["num targets"])
#np_bin[n] = dist_L2_abs(np1[n]/summary1.stat["num targets"], np2[n]/summary2.stat["num targets"])
#np_bin[n] = distance_poisson_draw(np2[n]/summary2.stat["num targets"]*summary1.stat["num targets"], convert(Int64, np1[n]))
np_bin[n], num_detect_sim[n] = distance_sum_of_bernoulli_draws(floor(Int64,np1[n]),summary1.stat["num targets"], summary2.stat["weight_list"], summary2.stat["num targets"], bin_match_list[n])
#println("True # [Bin ", n,"] = ",np1[n],", Expected # [Bin ", n,"] = ",np2[n])
end
#d[1] = maximum(np_bin)
#d[1] = sum(np_bin)
np1_ratio = np1 ./ summary1.stat["num targets"]
np2_ratio = num_detect_sim ./ summary1.stat["num targets"]
d[1] = distance_canberra(np1_ratio, np2_ratio)# + distance_cosine(np1_ratio, np2_ratio)
#println("Total rate: ", rate_tab[1,1], " / Distance (radii): ", d[1], " / Sim. cat. ratio = ", sum(num_detect_sim[1:r_dim])/summary2.stat["num_targets"], " / Obs. cat. ratio = ", sum(np1[1:r_dim])/summary1.stat["num targets"], " / Distance (total): ", dist_L2_abs(sum(num_detect_sim[1:r_dim])/summary2.stat["num targets"], sum(np1[1:r_dim])/summary1.stat["num targets"])*r_dim)
# for j in 1:p_dim
# d[1] += dist_L2_abs(sum(num_detect_sim[(j-1)*r_dim+1:(j-1)*r_dim+r_dim])/summary1.stat["num targets"], sum(np1[(j-1)*r_dim+1:(j-1)*r_dim+r_dim])/summary1.stat["num targets"])*r_dim
# end
else
println("# calc_distance_vector_demo doesn't know what to do for pass= ", pass)
end
return d
end
#=
function calc_distance_vector_relative_binned(summary1::CatalogSummaryStatistics, summary2::CatalogSummaryStatistics, pass::Int64, sim_param::SimParam ; verbose::Bool = false)
p_dim = length(get_any(sim_param, "p_lim_arr", Array{Float64,1}))-1
r_dim = length(get_any(sim_param, "r_lim_arr", Array{Float64,1}))-1
#rate_tab::Array{Float64,2} = get_any(sim_param, "obs_par", Array{Float64,2})
d = Array{Float64}(0)
if pass == 1
if verbose
println("# Summary 1, pass 1: ",summary1)
println("# Summary 2, pass 1: ",summary2)
end
d = zeros(3)
np1 = haskey(summary1.stat,"planets table") ? summary1.stat["planets table"] : summary1.stat["expected planets table"]
np2 = haskey(summary2.stat,"planets table") ? summary2.stat["planets table"] : summary2.stat["expected planets table"]
np_bin = zeros(length(np1))
num_detect_sim = zeros(length(np1))
### Bernoulli distance
bin_match_list = summary2.cache["bin_match_list"]
@assert length(bin_match_list) == length(np1)
np2 = zeros(Int64,length(np1))
###
for n in 1:length(np1)
#np_bin[n] = dist_L1_abs(np1[n]/summary1.stat["num targets"], np2[n]/summary2.stat["num targets"])
#np_bin[n] = dist_L2_abs(np1[n]/summary1.stat["num targets"], np2[n]/summary2.stat["num targets"])
#np_bin[n] = distance_poisson_draw(np2[n]/summary2.stat["num targets"]*summary1.stat["num targets"], convert(Int64, np1[n]))
np_bin[n], num_detect_sim[n] = distance_sum_of_bernoulli_draws(floor(Int64,np1[n]),summary1.stat["num targets"], summary2.stat["weight_list"], summary2.stat["num targets"], bin_match_list[n])
#println("True # [Bin ", n,"] = ",np1[n],", Expected # [Bin ", n,"] = ",np2[n])
end
#d[1] = maximum(np_bin)
d[1] = 0.0
np1_col = 0
np2_col = 0
#println("Total rate: ", rate_tab[1,1], " / Distance (radii): ", d[1], " / Sim. cat. ratio = ", sum(num_detect_sim[1:r_dim])/summary2.stat["num_targets"], " / Obs. cat. ratio = ", sum(np1[1:r_dim])/summary1.stat["num targets"], " / Distance (total): ", dist_L2_abs(sum(num_detect_sim[1:r_dim])/summary2.stat["num targets"], sum(np1[1:r_dim])/summary1.stat["num targets"])*r_dim)
for j in 1:p_dim
d[1] += dist_L2_abs(sum(num_detect_sim[(j-1)*r_dim+1:(j-1)*r_dim+r_dim])/summary1.stat["num targets"], sum(np1[(j-1)*r_dim+1:(j-1)*r_dim+r_dim])/summary1.stat["num targets"])*r_dim
np1_col = sum(np1[(j-1)*r_dim+1:(j-1)*r_dim+r_dim])
np2_col = sum(num_detect_sim[(j-1)*r_dim+1:(j-1)*r_dim+r_dim])
for i in 1:r_dim
d[1] += dist_L2_abs(num_detect_sim[(j-1)*r_dim+i]/np2_col, np1[(j-1)*r_dim+i]/np1_col)
end
end
else
println("# calc_distance_vector_demo doesn't know what to do for pass= ", pass)
end
return d
end
=#
## eval_model
# function test_dr25binrates()
# global sim_param_closure = setup_sim_param_dr25binrates()
# cat_phys = generate_kepler_physical_catalog(sim_param_closure)
# cat_obs = observe_kepler_targets_single_obs(cat_phys,sim_param_closure)
# global summary_stat_ref_closure = calc_summary_stats_obs_demo(cat_obs,sim_param_closure)
# global cat_phys_try_closure = generate_christiansen_catalog(sim_param_closure)
# global cat_obs_try_closure = observe_kepler_targets_sky_avg(cat_phys_try_closure,sim_param_closure)
# global summary_stat_try_closure = calc_summary_stats_sim_pass_one_demo(cat_obs_try_closure,cat_phys_try_closure,sim_param_closure)
# summary_stat_try_closure = calc_summary_stats_sim_pass_two_demo(cat_obs_try_closure,cat_phys_try_closure,summary_stat_try_closure,sim_param_closure)
# param_guess = make_vector_of_sim_param(sim_xparam_closure)
# evaluate_model_scalar_ret( param_guess)
# end
## inverse_detection & simple bayesian
function inv_det(cat_obs::KeplerObsCatalog, param::SimParam)
num_targ = ExoplanetsSysSim.StellarTable.num_usable_in_star_table()
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
println("------------------------------")
cnt_bin, np_bin = cnt_np_bin(cat_obs, param)
println("------------------------------")
println("Inverse Detection Rates:")
for i in 1:(length(limitP)-1)
for j in 1:(length(limitRp)-1)
rate_f = np_bin[(i-1)*(length(limitRp)-1) + j]/num_targ*100.
if cnt_bin[(i-1)*(length(limitRp)-1) + j] > 0.
println(rate_f,
" +/- ", rate_f/sqrt(cnt_bin[(i-1)*(length(limitRp)-1) + j]), " %")
else
println(rate_f,
" +/- N/A %")
end
end
end
println()
end
function simp_bayes(cat_obs::KeplerObsCatalog, param::SimParam)
num_targ = ExoplanetsSysSim.StellarTable.num_usable_in_star_table()
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
println("------------------------------")
cnt_bin, np_bin = cnt_np_bin(cat_obs, param)
println("------------------------------")
ess_bin = stellar_ess(param)
println("------------------------------")
println("Simple Bayesian Rates:")
for i in 1:(length(limitP)-1)
for j in 1:(length(limitRp)-1)
rate_f = (1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j])/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])*100.
up_quant = quantile(Gamma(1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j], 1.0/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])), 0.8413)*100.
low_quant = quantile(Gamma(1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j], 1.0/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])), 0.1587)*100.
println(rate_f,
" + ", up_quant - rate_f,
" - ", rate_f - low_quant, " %")
end
end
println()
end
function inv_det_simp_bayes(cat_obs::KeplerObsCatalog, param::SimParam)
num_targ = ExoplanetsSysSim.StellarTable.num_usable_in_star_table()
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
println("------------------------------")
cnt_bin, np_bin = cnt_np_bin(cat_obs, param)
println("------------------------------")
ess_bin = stellar_ess(param)
println("------------------------------")
println("Inverse Detection Rates:")
for i in 1:(length(limitP)-1)
for j in 1:(length(limitRp)-1)
rate_f = np_bin[(i-1)*(length(limitRp)-1) + j]/num_targ*100.
if cnt_bin[(i-1)*(length(limitRp)-1) + j] > 0.
println(rate_f,
" +/- ", rate_f/sqrt(cnt_bin[(i-1)*(length(limitRp)-1) + j]), " %")
else
println(rate_f,
" +/- N/A %")
end
end
end
println()
println("Simple Bayesian Rates:")
for i in 1:(length(limitP)-1)
for j in 1:(length(limitRp)-1)
rate_f = (1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j])/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])*100.
up_quant = quantile(Gamma(1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j], 1.0/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])), 0.8413)*100.
low_quant = quantile(Gamma(1.0+cnt_bin[(i-1)*(length(limitRp)-1) + j], 1.0/(1.0+ess_bin[(i-1)*(length(limitRp)-1) + j])), 0.1587)*100.
println(rate_f,
" + ", up_quant - rate_f,
" - ", rate_f - low_quant, " %")
end
end
println()
end
## cnt_bin & np_bin (inverse detection & simple bayesian)
function cnt_np_bin(cat_obs::KeplerObsCatalog, param::SimParam, verbose::Bool = true)
num_targ = ExoplanetsSysSim.StellarTable.num_usable_in_star_table()
idx_tranets = findall(x::KeplerTargetObs-> length(x.obs) > 0, cat_obs.target)::Array{Int64,1}
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
np_bin = zeros((length(limitP)-1) * (length(limitRp)-1))
cnt_bin = zeros((length(limitP)-1) * (length(limitRp)-1))
pl_idx = 1
println("Calculating completeness for each planet...")
for i in idx_tranets
for j in 1:num_planets(cat_obs.target[i])
pper = cat_obs.target[i].obs[j].period
prad = sqrt(cat_obs.target[i].obs[j].depth)*cat_obs.target[i].star.radius
pbin = findfirst(x -> ((pper > limitP[x]) && (pper < limitP[x+1])), collect(1:(length(limitP)-1)))
rbin = findfirst(x -> ((prad > limitRp[x]) && (prad < limitRp[x+1])), collect(1:(length(limitRp)-1)))
if (pbin > 0 && rbin > 0)
cnt_bin[(pbin-1)*(length(limitRp)-1) + rbin] += 1
pgeo = ExoplanetsSysSim.calc_transit_prob_single_planet_approx(pper, cat_obs.target[i].star.radius, cat_obs.target[i].star.mass)
pdet = 0.0
for star_id in 1:num_targ
ld = ExoplanetsSysSim.LimbDarkeningParam4thOrder(ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff1), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff2), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff3), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff4) )
star = SingleStar(ExoplanetsSysSim.StellarTable.star_table(star_id,:radius),ExoplanetsSysSim.StellarTable.star_table(star_id,:mass),1.0, ld, star_id)
cdpp_arr = ExoplanetsSysSim.make_cdpp_array_empty(star_id)#(1.0e-6*sqrt(1.0 / 24.0 / ExoplanetsSysSim.LC_duration)) .* [ExoplanetsSysSim.StellarTable.star_table(star_id, :rrmscdpp01p5)*sqrt(1.5), ExoplanetsSysSim.StellarTable.star_table(star_id, :rrmscdpp02p0)*sqrt(2.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp02p5)*sqrt(2.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp03p0)*sqrt(3.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp03p5)*sqrt(3.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp04p5)*sqrt(4.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp05p0)*sqrt(5.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp06p0)*sqrt(6.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp07p5)*sqrt(7.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp09p0)*sqrt(9.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp10p5)*sqrt(10.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp12p0)*sqrt(12.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp12p5)*sqrt(12.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp15p0)*sqrt(15.)]
#cdpp = 1.0e-6 * ExoplanetsSysSim.StellarTable.star_table(star_id, :rrmscdpp04p5) * sqrt(4.5/24.0 / ExoplanetsSysSim.LC_duration )
contam = 0.0
data_span = ExoplanetsSysSim.StellarTable.star_table(star_id, :dataspan)
duty_cycle = ExoplanetsSysSim.StellarTable.star_table(star_id, :dutycycle)
pl_arr = Array{Planet}(undef,1)
orbit_arr = Array{Orbit}(undef,1)
incl = acos(Base.rand()*star.radius*ExoplanetsSysSim.rsol_in_au/ExoplanetsSysSim.semimajor_axis(pper, star.mass))
orbit_arr[1] = Orbit(pper, 0., incl, 0., 0., Base.rand()*2.0*pi)
pl_arr[1] = Planet(prad, 1.0e-6)
if ExoplanetsSysSim.StellarTable.star_table_has_key(:wf_id)
wf_id = ExoplanetsSysSim.StellarTable.star_table(star_id,:wf_id)
else
wf_id = ExoplanetsSysSim.WindowFunction.get_window_function_id(ExoplanetsSysSim.StellarTable.star_table(star_id,:kepid))
end
kep_targ = KeplerTarget([PlanetarySystem(star, pl_arr, orbit_arr)], repeat(cdpp_arr, outer=[1,1]),contam,data_span,duty_cycle,wf_id)
duration = ExoplanetsSysSim.calc_transit_duration(kep_targ,1,1)
if duration <= 0.
continue
end
ntr = ExoplanetsSysSim.calc_expected_num_transits(kep_targ, 1, 1, param)
depth = ExoplanetsSysSim.calc_transit_depth(kep_targ,1,1)
cdpp = ExoplanetsSysSim.interpolate_cdpp_to_duration_lookup_cdpp(kep_targ, duration)
#cdpp = ExoplanetsSysSim.interpolate_cdpp_to_duration(kep_targ, duration)
snr = ExoplanetsSysSim.calc_snr_if_transit_cdpp(kep_targ, depth, duration, cdpp, param, num_transit=ntr)
pdet += ExoplanetsSysSim.calc_prob_detect_if_transit(kep_targ, snr, pper, duration, param, num_transit=ntr)
end
np_bin[(pbin-1)*(length(limitRp)-1) + rbin] += 1.0/pgeo/(pdet/num_targ)
if verbose
println("Planet ",pl_idx," => Bin ", (pbin-1)*(length(limitRp)-1) + rbin, ", C = ", 1.0/pgeo/(pdet/num_targ))
end
pl_idx += 1
end
end
end
return cnt_bin, np_bin
end
## stellar catalog ess (simple bayesian)
function stellar_ess(param::SimParam, verbose::Bool = true)
num_realiz = 100
num_targ = ExoplanetsSysSim.StellarTable.num_usable_in_star_table()
limitP::Array{Float64,1} = get_any(param, "p_lim_arr", Array{Float64,1})
limitRp::Array{Float64,1} = get_any(param, "r_lim_arr", Array{Float64,1})
ess_bin = zeros((length(limitP)-1) * (length(limitRp)-1))
println(string("Stellar ESS calculation beginning..."))
for star_id in 1:num_targ
ld = ExoplanetsSysSim.LimbDarkeningParam4thOrder(ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff1), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff2), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff3), ExoplanetsSysSim.StellarTable.star_table(star_id,:limbdark_coeff4) )
star = SingleStar(ExoplanetsSysSim.StellarTable.star_table(star_id,:radius),ExoplanetsSysSim.StellarTable.star_table(star_id,:mass),1.0, ld, star_id)
cdpp_arr = ExoplanetsSysSim.make_cdpp_array_empty(star_id)#(1.0e-6*sqrt(1.0/24.0/ExoplanetsSysSim.LC_duration)) .* [ExoplanetsSysSim.StellarTable.star_table(star_id, :rrmscdpp01p5)*sqrt(1.5), ExoplanetsSysSim.StellarTable.star_table(star_id, :rrmscdpp02p0)*sqrt(2.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp02p5)*sqrt(2.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp03p0)*sqrt(3.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp03p5)*sqrt(3.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp04p5)*sqrt(4.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp05p0)*sqrt(5.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp06p0)*sqrt(6.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp07p5)*sqrt(7.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp09p0)*sqrt(9.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp10p5)*sqrt(10.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp12p0)*sqrt(12.), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp12p5)*sqrt(12.5), ExoplanetsSysSim.StellarTable.star_table(star_id,:rrmscdpp15p0)*sqrt(15.)]
contam = 0.0
data_span = ExoplanetsSysSim.StellarTable.star_table(star_id, :dataspan)
duty_cycle = ExoplanetsSysSim.StellarTable.star_table(star_id, :dutycycle)
if ExoplanetsSysSim.StellarTable.star_table_has_key(:wf_id)
wf_id = ExoplanetsSysSim.StellarTable.star_table(star_id,:wf_id)
else
wf_id = ExoplanetsSysSim.WindowFunction.get_window_function_id(ExoplanetsSysSim.StellarTable.star_table(star_id,:kepid))
end
for i_idx in 1:(length(limitP)-1)
for j_idx in 1:(length(limitRp)-1)
temp_bin = 0.0
for n_test in 1:num_realiz
pper = exp(Base.rand()*(log(limitP[i_idx+1])-log(limitP[i_idx]))+log(limitP[i_idx]))
prad = exp(Base.rand()*(log(limitRp[j_idx+1])-log(limitRp[j_idx]))+log(limitRp[j_idx]))
pgeo = ExoplanetsSysSim.calc_transit_prob_single_planet_approx(pper, star.radius, star.mass)
pdet = 0.0
pl_arr = Array{Planet}(undef,1)
orbit_arr = Array{Orbit}(undef,1)
incl = acos(Base.rand()*star.radius*ExoplanetsSysSim.rsol_in_au/ExoplanetsSysSim.semimajor_axis(pper, star.mass))
orbit_arr[1] = Orbit(pper, 0., incl, 0., 0., Base.rand()*2.0*pi)
pl_arr[1] = Planet(prad, 1.0e-6)
kep_targ = KeplerTarget([PlanetarySystem(star, pl_arr, orbit_arr)], repeat(cdpp_arr, outer=[1,1]),contam,data_span,duty_cycle,wf_id)
duration = ExoplanetsSysSim.calc_transit_duration(kep_targ,1,1)
if duration <= 0.
continue
end
ntr = ExoplanetsSysSim.calc_expected_num_transits(kep_targ, 1, 1, param)
depth = ExoplanetsSysSim.calc_transit_depth(kep_targ,1,1)
# Apply correction to snr if grazing transit
size_ratio = kep_targ.sys[1].planet[1].radius/kep_targ.sys[1].star.radius
b = ExoplanetsSysSim.calc_impact_parameter(kep_targ.sys[1],1)
snr_correction = ExoplanetsSysSim.calc_depth_correction_for_grazing_transit(b,size_ratio)
depth *= snr_correction
#cdpp = ExoplanetsSysSim.interpolate_cdpp_to_duration(kep_targ, duration)
cdpp = ExoplanetsSysSim.interpolate_cdpp_to_duration_lookup_cdpp(kep_targ, duration)
snr = ExoplanetsSysSim.calc_snr_if_transit_cdpp(kep_targ, depth, duration, cdpp, param, num_transit=ntr)
#kepid = ExoplanetsSysSim.StellarTable.star_table(kep_targ.sys[1].star.id, :kepid)
#osd_duration = ExoplanetsSysSim.get_legal_durations(pper,duration) #tests if durations are included in Kepler's observations for a certain planet period. If not, returns nearest possible duration
#osd = ExoplanetsSysSim.WindowFunction.interp_OSD_from_table(kepid, pper, osd_duration)
#if osd_duration > duration #use a correcting factor if this duration is lower than the minimum searched for this period.
# osd = osd*osd_duration/duration
#end
#snr = ExoplanetsSysSim.calc_snr_if_transit(kep_targ, depth, duration, osd, sim_param, num_transit=ntr)
pdet = ExoplanetsSysSim.calc_prob_detect_if_transit(kep_targ, snr, pper, duration, param, num_transit=ntr)
temp_bin += (pgeo*pdet)
end
ess_bin[(i_idx-1)*(length(limitRp)-1) + j_idx] += temp_bin/num_realiz
end
end
if verbose && rem(star_id, 10^convert(Int,floor(log10(num_targ)))) == 0.
println(string("Star #", star_id, " finished"))
end
end
if verbose
println("")
for i in 1:(length(limitP)-1)
for j in 1:(length(limitRp)-1)
println("Period limits: ", limitP[i:i+1], " / Radius limits: ", limitRp[j:j+1]/ExoplanetsSysSim.earth_radius, " / Stellar ESS = ", ess_bin[(i-1)*(length(limitRp)-1) + j])
end
end
end
return ess_bin
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 4204 | ## ExoplanetsSysSim/src/ExoplanetsSysSim.jl
## (c) 2015 Eric B. Ford
using DataFrames # needed outside module, so JLD/JLD2 load has the type
module ExoplanetsSysSim
# Packages to be used # Note: Tried to keep to a minimum in core package to help with maintainability
import Compat: @compat #, readstring, is_windows
if VERSION < v"0.7"
import Compat: UTF8String, ASCIIString
end
if VERSION >= v"0.5-"
using Combinatorics
end
using Distributions
using PDMats
# using StatsFuns
using DataFrames
if VERSION < VersionNumber(0,3,20)
using Docile
end
using CORBITS
# Includes & associated exports for types, then generic functions, then demo functions
include("constants.jl")
#require(joinpath(Pkg.dir("ExoplanetsSysSim"), "src", "constants.jl"))
export SimParam
export SimulationParameters
export add_param_fixed, add_param_active, set_active, set_inactive, is_active, update_param, get, get_real, get_int, get_bool, get_function, get_any, haskey, make_vector_of_sim_param, update_sim_param_from_vector!, make_vector_of_active_param_keys, get_range_for_sim_param
export setup_sim_param_demo
include("simulation_parameters.jl")
using ExoplanetsSysSim.SimulationParameters
export Orbit
include("orbit.jl")
export Planet
include("planet.jl")
export LimbDarkeningParamAbstract, LimbDarkeningParamLinear, LimbDarkeningParamQuadratic, LimbDarkeningParam4thOrder
include("limb_darkening.jl")
export depth_at_midpoint, ratio_from_depth
export StarAbstract, Star, SingleStar, BinaryStar
include("star.jl")
export flux, mass
export generate_stars
export PlanetarySystemAbstract, PlanetarySystemSingleStar, PlanetarySystem, SystemPlane
include("planetary_system.jl")
export test_stability, is_period_ratio_near_resonance, calc_if_near_resonance
export draw_truncated_poisson, draw_power_law, map_square_to_triangle
export cdf_lognormal, invert_cdf_lognormal, cdf_power_law, invert_cdf_power_law, draw_segmented_uniform
export compute_unstable_regions_periods_given_planets, compute_unstable_mutualHill_regions_periodscales_given_clusters, compute_regions_non_overlapping, compute_allowed_regions_cdf_lognormal, compute_allowed_regions_cdf_power_law
export draw_lognormal_allowed_regions, draw_power_law_allowed_regions, draw_period_lognormal_allowed_regions, draw_period_lognormal_allowed_regions_Hill, draw_period_lognormal_allowed_regions_mutualHill, draw_periodscale_power_law_allowed_regions_mutualHill
#include("corbits.jl")
#export prob_of_transits_approx
include("window_function.jl")
export WindowFunction
export setup_window_function, get_window_function_data, get_window_function_id, eval_window_function, setup_OSD_interp#, cdpp_vs_osd
include("stellar_table.jl")
export StellarTable
export setup_star_table, star_table, num_usable_in_star_table, set_star_table, star_table_has_key
export KeplerTarget
export num_planets, generate_kepler_target_from_table, generate_kepler_target_simple
include("target.jl")
export KeplerTargetObs
include("transit_observations.jl")
export semimajor_axis, period_given_semimajor_axis
#export setup_koi_table, koi_table, num_koi
export KeplerPhysicalCatalog, KeplerObsCatalog
include("kepler_catalog.jl")
export generate_kepler_physical_catalog, observe_kepler_targets_single_obs, observe_kepler_targets_sky_avg, simulated_read_kepler_observations, setup_actual_planet_candidate_catalog, read_koi_catalog
export CatalogSummaryStatistics
include("summary_statistics.jl")
export calc_summary_stats_obs_demo, calc_summary_stats_sim_pass_one_demo, calc_summary_stats_sim_pass_two_demo
include("abc_distance.jl")
export dist_L1_fractional, dist_L1_abs, dist_L2_fractional, dist_L2_abs, calc_scalar_distance, combine_scalar_distances, distance_poisson_likelihood, distance_poisson_draw, distance_sum_of_bernoulli_draws, distance_canberra, distance_cosine
export calc_distance_vector_demo
export TestEvalModel
include("eval_model.jl") # Also includes macros to help write eval model using different variables for closures
export test_eval_model
using ExoplanetsSysSim.TestEvalModel
export SysSimIO
include("io.jl")
using ExoplanetsSysSim.SysSimIO
export save_sim_param, save_sim_results, load_sim_param, load_distances, load_summary_stats
end # module
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 14597 | ## ExoplanetsSysSims/src/abc_distance.jl
## (c) 2015 Eric B. Ford
dist_L1_fractional(x::Real, y::Real) = (x==y==0.0) ? 0.0 : abs( (x-y)*2/(x+y) )
dist_L1_abs(x::Real, y::Real) = abs(x-y)
dist_L2_fractional(x::Real, y::Real) = (x==y==0.0) ? 0.0 : abs( (x-y)^2*2/(x^2+y^2) )
dist_L2_abs(x::Real, y::Real) = (x-y)^2
# Library of functions to convert distance vector into a scalar distance
calc_scalar_distance_sum(d::Vector{Float64}) = sum(d)
calc_scalar_distance_rms(d::Vector{Float64}) = sqrt(sumsq(d))
calc_scalar_distance_max(d::Vector{Float64}) = maximum(d)
# How to combine two distances based on subsets of the full distance vector (should always be greater than either for two pass algorithm to work)
combine_scalar_distances_sum(d1::Float64, d2::Float64) = d1+d2
combine_scalar_distances_rms(d1::Float64, d2::Float64) = sqrt(d1*d1+d2*d2)
combine_scalar_distances_max(d1::Float64, d2::Float64) = max(d1,d2)
# Pick which one to be used
calc_scalar_distance(d::Vector{Float64}) = calc_scalar_distance_sum(d)
combine_scalar_distances(d1::Float64, d2::Float64) = combine_scalar_distances_sum(d1,d2)
# compute supremum of differences between empirical cdfs.
# Borrowed from JuliaStats/HypothesisTests.jl
function ksstats(x::AbstractVector{T}, y::AbstractVector{S}) where {T<:Real, S<:Real}
n_x, n_y = length(x), length(y)
sort_idx = sortperm([x; y], alg=MergeSort)
pdf_diffs = [ones(n_x)/n_x; -ones(n_y)/n_y][sort_idx]
cdf_diffs = cumsum(pdf_diffs)
deltap = maximum(cdf_diffs)
deltan = -minimum(cdf_diffs)
delta = max(deltap,deltan)
(n_x, n_y, deltap, deltan, delta)
end
# weighted version # WARNING: Function is untested
function ksstats(x::AbstractVector{T}, y::AbstractVector{S}, wx::AbstractVector{T}, wy::AbstractVector{T}) where {T<:Real, S<:Real}
n_x, n_y = length(x), length(y)
wx .*= 1.0/sum(wx)
wy .*= 1.0/sum(wy)
sort_idx = sortperm([x; y], alg=MergeSort)
pdf_diffs = [wx; -wy][sort_idx]
cdf_diffs = cumsum(pdf_diffs)
deltap = maximum(cdf_diffs)
deltan = -minimum(cdf_diffs)
delta = max(deltap,deltan)
(n_x, n_y, deltap, deltan, delta) # should the first two values returned here be sum(wx) and sum(wy) before normalizing? For now, we ignore all but the final delta anyway.
end
dist_KS(x::AbstractVector{T}, y::AbstractVector{S}) where {T<:Real, S<:Real} = ksstats(x,y)[5]
dist_KS(x::AbstractVector{T}, y::AbstractVector{S}, wx::AbstractVector{T}, wy::AbstractVector{T}) where {T<:Real, S<:Real} = ksstats(x,y,wx,wy)[5]
##### Function to compute the KS distance for discrete arrays (i.e. of the planet multiplicities in the systems):
function ksstats_ints(x::AbstractVector{T}, y::AbstractVector{S}) where {T <: Int, S <: Int}
n_x, n_y = length(x), length(y)
xy_max = max(maximum(x), maximum(y)) #maximum of both x and y
x_counts = [sum(x .== n) for n in 1:xy_max]
y_counts = [sum(y .== n) for n in 1:xy_max]
pdf_diffs = x_counts./n_x .- y_counts./n_y
cdf_diffs = cumsum(pdf_diffs)
deltap = maximum(cdf_diffs)
deltan = -minimum(cdf_diffs)
delta = max(deltap,deltan)
(n_x, n_y, deltap, deltan, delta)
end
##### Function to compute the 2-sample Anderson-Darling (AD) distance between two empirical CDFs (continuous distributions):
function ADstats(x::AbstractVector{T}, y::AbstractVector{S}) where {T <: Real, S <: Real}
#This function computes the AD distance according to A. N. Pettitt (1976) Eq. (1.2)
n, m = length(x), length(y)
N = n + m
sort_idx = sortperm([x; y], alg=MergeSort) #array of indices that would sort the combined array
M_i_diffs = [ones(n); zeros(m)][sort_idx]
M_i_array = cumsum(M_i_diffs)[1:end-1] #array of M_i except for last element, i.e. from i=1 to i=N-1
i_array = 1:(N-1) #array of i from i=1 to i=N-1
AD_dist = (1/(n*m))*sum(((M_i_array*N .- n*i_array).^2)./(i_array.*(N .- i_array))) #AD distance
return AD_dist
end
function ADstats_mod(x::AbstractVector{T}, y::AbstractVector{S}) where {T <: Real, S <: Real}
#This function is the same as 'ADstats' except without the factor of 'nm/N' before the integral
n, m = length(x), length(y)
N = n + m
sort_idx = sortperm([x; y], alg=MergeSort) #array of indices that would sort the combined array
M_i_diffs = [ones(n); zeros(m)][sort_idx]
M_i_array = cumsum(M_i_diffs)[1:end-1] #array of M_i except for last element, i.e. from i=1 to i=N-1
i_array = 1:(N-1) #array of i from i=1 to i=N-1
AD_dist = (N/((n*m)^2))*sum(((M_i_array*N .- n*i_array) .^2) ./(i_array .*(N .- i_array))) #AD distance
return AD_dist
end
##### Function to compute the Cressie-Read Power Divergence (CRPD) statistic for the distributions of observed planet multiciplities:
function CRPDstats(En::AbstractVector{T}, On::AbstractVector{S}) where {T <: Int, S <: Int}
#En and On must be arrays of the total numbers of systems with 1,2,3,... observed planets, in the simulated (i.e. expected) and the actual (i.e. observed Kepler) data, respectively
n_max = max(length(En), length(On))
En = append!(En, zeros(n_max - length(En)))
On = append!(On, zeros(n_max - length(On)))
E_array = En./sum(En) #normalized numbers (fractions) of simulated systems with 1,2,3,... observed planets
O_array = On./sum(On) #normalized numbers (fractions) of actual Kepler systems with 1,2,3,... observed planets
rho = 0.
for (i,E_i) in enumerate(E_array)
if En[i] != 0
rho += O_array[i]*((O_array[i]/E_array[i])^(2/3) - 1)
end
end
rho = (9/5)*rho
return rho
end
# lambda: rate for Poisson process, i.e., expected value for number of events
# k: number of events observed
function calc_num_events_to_maximize_poisson_pdf(lambda::Real)
kstar = floor(Int64,lambda)
if lambda > kstar+1
kstar +=1
end
return kstar
end
function log_prob_poisson_num_events_given_rate(lambda::Real,k::Real)
k*log(lambda)-lambda-lgamma(k+1)
end
function delta_log_prob_poisson_num_events_given_rate(lambda::Real, k::Integer, kstar::Integer = calc_num_events_to_maximize_poisson_pdf(lambda) )
delta = (k-kstar)*log(lambda)
if kstar>=k+1
delta += sum(log.((k+1):kstar))
elseif k>=kstar+1
delta -= sum(log.((kstar+1):k))
end
return delta
end
function distance_poisson_likelihood(lambda::Real, k::Integer)
kstar = calc_num_events_to_maximize_poisson_pdf(lambda)
khi = round(Int64,lambda+sqrt(lambda))
delta_logprob_one_sigma_hi = delta_log_prob_poisson_num_events_given_rate(lambda,khi,kstar)
delta_logp_k = delta_log_prob_poisson_num_events_given_rate(lambda,k,kstar)
delta_logp_k/delta_logprob_one_sigma_hi
end
function distance_poisson_draw(lambda::Real, k::Integer)
d = Distributions.Poisson(lambda)
simulated_number_of_detections = rand(d)
abs( simulated_number_of_detections -k)
end
"""
distance_sum_of_bernoulli_draws(num_pl_obs, num_targets_obs, prob_detect_list, num_targets_sim, bin_match_list)
Calculate L2 distance and the number of simulated planets detected for a binned occurrence rate where each planet's detection is drawn from a Bernoulli distribution using the planet's detection probability.
# Arguments:
- `num_pl_obs::Integer`: Number of planets in (true) observed catalog
- `num_targets_obs::Integer`: Number of target stars in (true) observed catalog
- `prob_detect_list::Vector{Float}`: Detection probabilities for planets in simulated catalog
- `num_targets_sim::Integer`: Number of target stars in simulated catalog
- `bin_match_list::Vector{Integer}`: List of simulated catalog planet indices associated with current occurrence rate bin
# Returns:
- L2 distance between provided (true) observed catalog and simulated catalog summary statistics
- Number of detected simulated planets
"""
function distance_sum_of_bernoulli_draws(num_pl_obs::Integer, num_targets_obs::Integer, prob_detect_list::Vector{TReal}, num_targets_sim::Integer, bin_match_list::Vector{TInt}) where {TReal<:Real, TInt<:Integer}
@assert(0<=num_pl_obs<=num_targets_obs)
num_pl_match = length(bin_match_list)
@assert(0<=length(bin_match_list))
num_detect_sim = 0
if num_pl_match >= 1
num_draws_all = min(max(1,floor(Int64, num_targets_obs/num_targets_sim)),1000)
@assert(1<=num_draws_all<=1000)
for i in 1:num_pl_match
pl_id = bin_match_list[i]
@assert(1<=pl_id<=length(prob_detect_list))
prob_detect = min(prob_detect_list[pl_id],1.0)
num_detect_sim += sum(rand(Bernoulli(prob_detect),num_draws_all))
end
# If number of targets observed is not a multiple of number of targets simulated, then pick a random set to make total number of draws equal (as long as there are some planets to choose from)
for i in (num_pl_match*num_draws_all+1):(floor(Int64,num_pl_match*num_targets_obs/num_targets_sim))
pl_id = bin_match_list[rand(1:num_pl_match)]
prob_detect = min(prob_detect_list[pl_id],1.0)
num_detect_sim += rand(Bernoulli(prob_detect))
end
end
distance = dist_L2_abs(num_pl_obs/num_targets_obs, num_detect_sim/num_targets_obs)
return distance, num_detect_sim
end
# compute Canberra distance.
function distance_canberra_modified(x::AbstractVector{T}, y::AbstractVector{S}) where {T<:Real, S<:Real}
@assert length(x) == length(y)
dist_sum = 0.0
for i in 1:length(x)
numer = abs(x[i]-y[i])
denom = sqrt(x[i] + y[i])
if denom == 0.0
continue
else
dist_sum += numer/denom
end
end
return dist_sum
end
# compute original Canberra distance.
function distance_canberra_orig(x::AbstractVector{T}, y::AbstractVector{S}) where {T<:Real, S<:Real}
@assert length(x) == length(y)
dist_sum = 0.0
for i in 1:length(x)
numer = abs(x[i]-y[i])
denom = abs(x[i]) + abs(y[i])
if denom == 0.0
continue
else
dist_sum += numer/denom
end
end
return dist_sum
end
distance_canberra(x::AbstractVector{T}, y::AbstractVector{S}) where {T<:Real, S<:Real} = distance_canberra_modified(x,y) # WARNING: Defaults to modified Canberra distance. At some point, update Danley's code to call distance_canberra_modified, so can remove this
# compute Cosine distance.
function distance_cosine(x::AbstractVector{T}, y::AbstractVector{S}) where {T<:Real, S<:Real}
@assert length(x) == length(y)
numer = 0.0
denom_1 = 0.0
denom_2 = 0.0
for i in 1:length(x)
numer += x[i]*y[i]
denom_1 += x[i]^2
denom_2 += y[i]^2
end
return numer/(sqrt(denom_1)*sqrt(denom_2))
end
function wasserstein_distance(u_values::AbstractVector{T1}, v_values::AbstractVector{T2}, p::Integer) where { T1<:Real, T2<:Real}
# code adapted from SciPy.stats._cdf_distance
u_sorter = issorted(u_values) ? (1:length(u_values)) : sortperm(u_values)
v_sorter = issorted(v_values) ? (1:length(v_values)) : sortperm(v_values)
all_values = sort(vcat(u_values,v_values),alg=MergeSort)
deltas = all_values[2:end] .- all_values[1:end-1]
u_cdf = map(x->searchsortedlast(u_values[u_sorter],x)/ length(u_values),all_values[1:end-1])
v_cdf = map(x->searchsortedlast(v_values[v_sorter],x)/ length(v_values),all_values[1:end-1])
if p==1
return sum(abs.(u_cdf.-v_cdf).*deltas)
elseif p==2
return sqrt(sum((u_cdf.-v_cdf).^2 .* deltas))
else
return pow(sum(abs.(u_cdf.-v_cdf).^p .* deltas),1/p)
end
end
earth_mover_distance(u_values::AbstractVector{T1}, v_values::AbstractVector{T2}) where { T1<:Real, T2<:Real} = wasserstein_distance(u_values,v_values,1)
# TODO USER SCI: IMPORTANT: Replace the distance function with something well thought out for your particular scientific application. See examples
function calc_distance_vector_demo(summary1::CatalogSummaryStatistics, summary2::CatalogSummaryStatistics, pass::Int64, sim_param::SimParam ; verbose::Bool = false)
d = Array{Float64}(undef,0)
if pass == 1
if verbose
println("# Summary 1, pass 1: ",summary1)
println("# Summary 2, pass 1: ",summary2)
end
d = zeros(3)
# Since observed and simulated catalogs can have different summary statistics for the number of planets, prefer detections if avaliable (e.g., after pass2), otherwise use expected (e.g., from pass 1)
np1 = haskey(summary1.stat,"planets detected") ? summary1.stat["planets detected"] : summary1.stat["expected planets detected"]
np2 = haskey(summary2.stat,"planets detected") ? summary2.stat["planets detected"] : summary2.stat["expected planets detected"]
d[1] = dist_L1_abs(np1/summary1.stat["num targets"],np2/summary2.stat["num targets"]) # Normalize so different statistics weighted appropriately and not dominated by this one
#println("np1 = ",np1,", np2 = ",np2)
#println("np1 (normalized) = ",np1/summary1.stat["num targets"],", np2 (normalized) = ",np2/summary2.stat["num targets"],", d[1] = ",d[1])
#d[2] = dist_L1_abs(summary1.stat["mean log10 P"],summary2.stat["mean log10 P"])
#d[3] = dist_L1_abs(summary1.stat["mean log10 depth"],summary2.stat["mean log10 depth"])
#d[4] = dist_L1_abs(summary1.stat["std log10 P"],summary2.stat["std log10 P"])
#d[5] = dist_L1_abs(summary1.stat["std log10 depth"],summary2.stat["std log10 depth"])
#d[2] = dist_KS(summary1.stat["P list"], summary2.stat["P list"])
#d[3] = dist_KS(summary1.stat["depth list"], summary2.stat["depth list"])
# EDITS for christiansen-single-bin
#d[2] = dist_KS(summary1.stat["P list"], summary2.stat["P list"],summary1.stat["weight list"],summary2.stat["weight list"])
#d[3] = dist_KS(summary1.stat["depth list"], summary2.stat["depth list"],summary1.stat["weight list"],summary2.stat["weight list"])
# END EDITS
elseif pass == 2
max_tranets_in_sys = get_int(sim_param,"max_tranets_in_sys")
d = zeros(max_tranets_in_sys)
for n in 1:max_tranets_in_sys
d[n] = n*dist_L1_abs(summary1.stat["num_sys_tranets"][n]/summary1.stat["num targets"],summary2.stat["num_sys_tranets"][n]/summary2.stat["num targets"])
end
else
println("# calc_distance_vector_demo doesn't know what to do for pass= ", pass)
end
return d
end
function test_abc_distance(cat_obs::KeplerObsCatalog, cat_phys::KeplerPhysicalCatalog, sim_param::SimParam)
ss_pass1 = calc_summary_stats_sim_pass_one_demo(cat_obs,cat_phys,sim_param)
ss_pass2 = calc_summary_stats_sim_pass_two_demo(cat_obs,cat_phys,ss_pass1,sim_param)
d1 = calc_distance_vector_demo(ss_pass1,ss_pass1, 1, sim_param)
d2 = calc_distance_vector_demo(ss_pass2,ss_pass2, 2, sim_param)
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 2973 | ## ExoplanetsSysSim/src/mission_constants.jl
## (c) 2015 Eric B. Ford
#module KeplerConstants
const num_channels = 84
const num_modules = 42
const num_quarters = 17 # QUERY: I'm favoring leaving out quarter 0, since that was engineering data. Agree?
const cdpp_durations = [1.5,2.,2.5,3.,3.5,4.5,5.,6.,7.5,9.,10.5,12.,12.5,15.]
const duration_symbols = [:rrmscdpp01p5, :rrmscdpp02p0,:rrmscdpp02p5,:rrmscdpp03p0,:rrmscdpp03p5,:rrmscdpp04p5,:rrmscdpp05p0,:rrmscdpp06p0,:rrmscdpp07p5,:rrmscdpp09p0,:rrmscdpp10p5,:rrmscdpp12p0,:rrmscdpp12p5,:rrmscdpp15p0 ]
const num_cdpp_timescales = 14
@assert num_cdpp_timescales == length(cdpp_durations) == length(duration_symbols)
const mission_data_span = 1459.789 # maximum(ExoplanetsSysSim.StellarTable.df[:dataspan])
const mission_duty_cycle = 0.8751 # median(ExoplanetsSysSim.StellarTable.df[:dutycycle])
const kepler_exp_time_internal = 6.019802903/(24*60*60) # https://archive.stsci.edu/kepler/manuals/archive_manual.pdf
const kepler_read_time_internal = 0.5189485261/(24*60*60) # https://archive.stsci.edu/kepler/manuals/archive_manual.pdf
const num_exposures_per_LC = 270
const num_exposures_per_SC = 9
const LC_integration_time = kepler_exp_time_internal*num_exposures_per_LC
const SC_integration_time = kepler_exp_time_internal*num_exposures_per_SC
const LC_read_time = kepler_read_time_internal*num_exposures_per_LC
const SC_read_time = kepler_read_time_internal*num_exposures_per_SC
const LC_duration = LC_integration_time + LC_read_time
const SC_duration = SC_integration_time + SC_read_time
const LC_rate = 1.0/LC_duration
const SC_rate = 1.0/SC_duration
# export num_channels,num_modules,num_quarters,num_cdpp_timescales,mission_data_span,mission_duty_cycle
# export kepler_exp_time_internal,kepler_read_time_internal,num_exposures_per_LC,num_exposures_per_SC
# export LC_integration_time,SC_integration_time,LC_read_time,SC_read_time,LC_duration,SC_duration,LC_rate,SC_rate
#end
#using KeplerConstants
# Standard conversion factors on which unit system is based
const global AU_in_m_IAU2012 = 149597870700.0
const global G_in_mks_IAU2015 = 6.67384e-11
const global G_mass_sun_in_mks = 1.3271244e20
const global G_mass_earth_in_mks = 3.986004e14
const global sun_radius_in_m_IAU2015 = 6.9566e8
const global earth_radius_eq_in_m_IAU2015 = 6.3781e6
const global sun_mass_in_kg_IAU2010 = 1.988547e30
# Constants used by this code
const global sun_mass = 1.0
const global earth_mass = G_mass_earth_in_mks/G_mass_sun_in_mks # about 3.0024584e-6
const global earth_radius = earth_radius_eq_in_m_IAU2015 / sun_radius_in_m_IAU2015 # about 0.0091705248
const global rsol_in_au = sun_radius_in_m_IAU2015 / AU_in_m_IAU2012 # about 0.00464913034
const global sec_in_day = 24*60*60
const global grav_const = G_in_mks_IAU2015 * sec_in_day^2 * sun_mass_in_kg_IAU2010 / AU_in_m_IAU2012^3 # about 2.9591220363e-4 in AU^3/(day^2 Msol)
const global day_in_year = 365.25
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 2649 | ## ExoplanetsSysSim/src/eval_model.jl
## (c) 2015 Eric B. Ford
# This merely demonstrates how to setup your own function to evaluate a simulated model against an observed catalog
# This code need not be used for any actual calculations
module TestEvalModel
export test_eval_model, evaluate_model_scalar_ret
using ExoplanetsSysSim
sim_param_closure = SimParam()
cat_phys_try_closure = KeplerPhysicalCatalog([])
cat_obs_try_closure = KeplerObsCatalog([])
summary_stat_try_closure = CatalogSummaryStatistics()
summary_stat_ref_closure = CatalogSummaryStatistics()
include("eval_model_macro.jl")
@make_evaluate_model(sim_param_closure,cat_phys_try_closure,cat_obs_try_closure,summary_stat_try_closure,summary_stat_ref_closure)
function test_eval_model()
global sim_param_closure = setup_sim_param_demo()
cat_phys = generate_kepler_physical_catalog(sim_param_closure)
cat_obs = observe_kepler_targets_single_obs(cat_phys,sim_param_closure)
global summary_stat_ref_closure = calc_summary_stats_obs_demo(cat_obs,sim_param_closure)
global cat_phys_try_closure = generate_kepler_physical_catalog(sim_param_closure)
global cat_obs_try_closure = observe_kepler_targets_single_obs(cat_phys_try_closure,sim_param_closure)
global summary_stat_try_closure = calc_summary_stats_sim_pass_one_demo(cat_obs_try_closure,cat_phys_try_closure,sim_param_closure)
summary_stat_try_closure = calc_summary_stats_sim_pass_two_demo(cat_obs_try_closure,cat_phys_try_closure,summary_stat_try_closure,sim_param_closure)
param_guess = make_vector_of_sim_param(sim_param_closure)
evaluate_model_scalar_ret( param_guess)
end
function test_eval_model_vs_sim_data()
global sim_param_closure = setup_sim_param_demo()
cat_phys = generate_kepler_physical_catalog(sim_param_closure)
# cat_obs = observe_kepler_targets_single_obs(cat_phys,sim_param_closure)
cat_obs = simulated_read_kepler_observations(sim_param_closure)
global summary_stat_ref_closure = calc_summary_stats_obs_demo(cat_obs,sim_param_closure)
global cat_phys_try_closure = generate_kepler_physical_catalog(sim_param_closure)
global cat_obs_try_closure = observe_kepler_targets_single_obs(cat_phys_try_closure,sim_param_closure)
global summary_stat_try_closure = calc_summary_stats_sim_pass_one_demo(cat_obs_try_closure,cat_phys_try_closure,sim_param_closure)
summary_stat_try_closure = calc_summary_stats_sim_pass_two_demo(cat_obs_try_closure,cat_phys_try_closure,summary_stat_try_closure,sim_param_closure)
param_guess = make_vector_of_sim_param(sim_param_closure)
evaluate_model_scalar_ret( param_guess)
end
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 2833 | ## ExoplanetsSysSim/src/eval_model_macro.jl
## (c) 2015 Eric B. Ford
# Macro to create functions that access variables with global scope (i.e., within the current module), so they can be used inside the evaluate model functions
macro make_evaluate_model(param,cat_phys,cat_obs,sum_stat,sum_stat_ref)
@eval begin
function evaluate_model(param_vector::Vector{Float64})
global $param
update_sim_param_from_vector!(param_vector,$param)
global $cat_phys = generate_kepler_physical_catalog($param)
#global $cat_obs = observe_kepler_targets_sky_avg($cat_phys,$param)
global $cat_obs = observe_kepler_targets_single_obs($cat_phys,$param)
global $sum_stat = calc_summary_stats_sim_pass_one_demo($cat_obs,$cat_phys,$param)
dist1 = calc_distance_vector_demo($sum_stat_ref,$sum_stat, 1, $param)
#if haskey($param,"minimum ABC dist skip pass 2")
# if calc_scalar_distance(dist1) > get($param,"minimum ABC dist skip pass 2")
# return dist1
# end
#end
$sum_stat = calc_summary_stats_sim_pass_two_demo($cat_obs,$cat_phys,$sum_stat,$param)
dist2 = calc_distance_vector_demo($sum_stat_ref,$sum_stat, 2, $param)
return [dist1; dist2]
end
function evaluate_model_pass_one(param_vector::Vector{Float64})
global $param
update_sim_param_from_vector!(param_vector,$param)
global $cat_phys = generate_kepler_physical_catalog($param)
#global $cat_obs = observe_kepler_targets_sky_avg($cat_phys,$param)
global $cat_obs = observe_kepler_targets_single_obs($cat_phys,$param)
global $sum_stat = calc_summary_stats_sim_pass_one_demo($cat_obs,$cat_phys,$param)
dist1 = calc_distance_vector_demo($sum_stat_ref,$sum_stat, 1, $param)
end
function evaluate_model_pass_two(param_vector::Vector{Float64})
global $param
global $cat_phys
global $cat_obs
global $sum_stat = calc_summary_stats_sim_pass_two_demo($cat_obs,$cat_phys,$sum_stat,$param)
dist2 = calc_distance_vector_demo($sum_stat_ref,$sum_stat, 2, $param)
end
function evaluate_model_scalar_ret(param::Vector{Float64})
calc_scalar_distance(evaluate_model(param))
end
function evaluate_model_pass_one_scalar_ret(param::Vector{Float64})
calc_scalar_distance(evaluate_model_pass_one(param))
end
function evaluate_model_pass_two_scalar_ret(param::Vector{Float64})
calc_scalar_distance(evaluate_model_pass_two(param))
end
#if module_name(current_module()) != :Main
if !isequal(@__MODULE__,Module(:Main))
export evaluate_model, evaluate_model_pass_one, evaluate_model_pass_two, evaluate_model_scalar_ret, evaluate_model_pass_one_scalar_ret, evaluate_model_pass_two_scalar_ret
end
end
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 3944 | ## ExoplanetsSysSim/src/io.jl
## (c) 2015 Eric B. Ford
module SysSimIO
using ExoplanetsSysSim
#using HDF5, JLD
using FileIO, JLD2
#if VERSION >= v"0.5-"
# import Compat: UTF8String, ASCIIString
#end
export save_sim_param, save_sim_results, load_sim_param, load_distances, load_summary_stats
# Save SimParam p to JLD2 file
function save_sim_param(filename::String, p::SimParam)
local file
try
file = JLD2.jldopen(filename,"w")
write_sim_param(file,p)
catch
println("# Problem writing parameters to jld2 file: ", filename)
return false
finally
close(file)
end
return true
end
# Save SimParam p, list of distances, and summary statistics to JLD2 file
function save_sim_results(filename::String, p::SimParam; distances::Vector{Float64}=Array{Float64}(0), summary_stats::CatalogSummaryStatistics = CatalogSummaryStatistics() )
local file
try
file = JLD2.jldopen(filename,"w")
write_sim_param(file,p)
write_sim_summary_stats(file,summary_stats)
write_sim_distances(file,distances)
catch
println("# Problem writing data to jld2 file: ", filename)
finally
close(file)
end
end
function write_sim_distances(file::JLD2.JLDFile, d::Vector{Float64} )
JLD2.write(file,"distances",d)
end
function write_sim_summary_stats(file::JLD2.JLDFile, ss::CatalogSummaryStatistics )
JLD2.write(file,"summary_stats",ss.stat)
end
function write_sim_param(file::JLD2.JLDFile, p::SimParam)
sim_param_bool = Dict{String,Bool}()
sim_param_int = Dict{String,Integer}()
sim_param_real = Dict{String,Real}()
sim_param_function = Dict{String,String}()
sim_param_string = Dict{String,String}()
for k in keys(p.param)
#println("# k=",k,". v=",p.param[k])
if typeof(p.param[k]) <: Bool
sim_param_bool[k] = p.param[k]
elseif typeof(p.param[k]) <: Integer
sim_param_int[k] = p.param[k]
elseif typeof(p.param[k]) <: Real
sim_param_real[k] = p.param[k]
elseif typeof(p.param[k]) <: Function
sim_param_function[k] = string(p.param[k])
elseif typeof(p.param[k]) <: AbstractString
sim_param_string[k] = convert(String,p.param[k])
else
warn(string("Can't store value of >",k,"< due to type ", typeof(p.param[k])))
end
end
JLD2.write(file,"sim_param_int",sim_param_int)
JLD2.write(file,"sim_param_real",sim_param_real)
JLD2.write(file,"sim_param_function",sim_param_function)
JLD2.write(file,"sim_param_string",sim_param_string)
JLD2.write(file,"sim_param_bool",sim_param_bool)
JLD2.write(file,"sim_param_active",p.active)
end
# Load SimParam from JLD2 file
function load_sim_param(filename::String)
local jld_data
try
jld_data = load(filename)
catch
println("# Problem reading parameters from jld file: ", filename)
end
p = SimParam()
merge!(p.active, jld_data["sim_param_active"])
merge!(p.param, jld_data["sim_param_int"])
merge!(p.param, jld_data["sim_param_string"])
merge!(p.param, jld_data["sim_param_real"])
#=
merge!(p.param, jld_data["sim_param_function"])
df = jld_data["sim_param_function"]
for k in keys(df)
p.param[k]::Function = symbol(df[k])
end
=#
merge!(p.param, jld_data["sim_param_bool"])
return p
end
# Load list of distances from JLD2 file
function load_distances(filename::String)
local jld_data
try
jld_data = load(filename)
catch
println("# Problem reading distances from jld file: ", filename)
end
d::Array{Float64,1} = jld_data["distances"]
return d
end
# Load summary statistics from JLD2 file
function load_summary_stats(filename::String)
local jld_data
try
jld_data = load(filename)
catch
println("# Problem reading parameters from jld file: ", filename)
end
s = CatalogSummaryStatistics()
merge!(s.stat, jld_data["summary_stats"])
return s
end
function test_io()
sim_param = setup_sim_param_demo()
save_sim_param("test.jld2",sim_param)
spd = load_sim_param("test.jld2")
rm("test.jld2")
end
end # module SysSimIO
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 18163 | ## ExoplanetsSysSim/src/kepler_catalog.jl
## (c) 2015 Eric B. Ford
#using ExoplanetsSysSim
using DataFrames
#using DataArrays
using CSV
#using JLD
#using JLD2
using FileIO
#if VERSION >= v"0.5-"
# import Compat: UTF8String, ASCIIString
#end
mutable struct KeplerPhysicalCatalog
target::Array{KeplerTarget,1}
end
#KeplerPhysicalCatalog() = KeplerPhysicalCatalog([])
mutable struct KeplerObsCatalog
target::Array{KeplerTargetObs,1}
end
KeplerObsCatalog() = KeplerObsCatalog(KeplerTargetObs[])
"""
generate_kepler_physical_catalog(sim_param)
Wrapper function to create catalog of simulated Kepler targets.
# Arguments:
- `sim_param::SimParam`: Simulation parameter object; must have the following parameters set:
- num_targets_sim_pass_one = Number of Kepler targets in simulated catalog
- generate_kepler_target = Function which generates Kepler targets
- (Optional) stellar_catalog = Stellar catalog filename
- (Optional) star_table_setup = Function that loads stellar catalog into DataFrame
# Returns:
Kepler physical catalog object containing all simulated Kepler targets and associated planetary systems.
"""
function generate_kepler_physical_catalog(sim_param::SimParam)
if haskey(sim_param,"stellar_catalog")
star_tab_func = get_function(sim_param, "star_table_setup")
star_tab_func(sim_param)
end
num_sys = get_int(sim_param,"num_targets_sim_pass_one")
generate_kepler_target = get_function(sim_param,"generate_kepler_target")
target_list = Array{KeplerTarget}(undef,num_sys)
map!(x->generate_kepler_target(sim_param), target_list, 1:num_sys )
return KeplerPhysicalCatalog(target_list)
end
"""
observe_kepler_targets_sky_avg(input, sim_param)
Wrapper function to create catalog of simulated observations of Kepler targets using the sky averaging observation scheme (i.e. each planet's detection probability is the average detection probability over all view-angles).
# Arguments:
- `input::KeplerPhysicalCatalog`: Catalog object of simulated Kepler targets and associated planetary systems to be observed
- `sim_param::SimParam`: Simulation parameter object; requires the following simulation parameters to be set:
- calc_target_obs_sky_ave: Function name for sky averaging simulated observations
# Returns:
Kepler observations catalog object containing all properties observed from the Kepler targets and associated planetary systems that were detected during the simulation.
"""
function observe_kepler_targets_sky_avg(input::KeplerPhysicalCatalog, sim_param::SimParam )
calc_target_obs = get_function(sim_param,"calc_target_obs_sky_ave")
return observe_kepler_targets(calc_target_obs, input, sim_param)
end
"""
observe_kepler_targets_single_obs(input, sim_param)
Wrapper function to create catalog of simulated observations of Kepler targets using the single observer observation scheme (i.e. each planet's detection probability is the detection probability from the Earth).
# Arguments:
- `input::KeplerPhysicalCatalog`: Catalog object of simulated Kepler targets and associated planetary systems to be observed
- `sim_param::SimParam`: Simulation parameter object; requires the following simulation parameters to be set:
- calc_target_obs_single_obs: Function name for single observer simulated observations
# Returns:
Kepler observations catalog object containing all properties observed from the Kepler targets and associated planetary systems that were detected during the simulation.
"""
function observe_kepler_targets_single_obs(input::KeplerPhysicalCatalog, sim_param::SimParam )
calc_target_obs = get_function(sim_param,"calc_target_obs_single_obs")
return observe_kepler_targets(calc_target_obs, input, sim_param)
end
"""
observe_kepler_targets(calc_target_obs, input, sim_param)
Wrapper function to create catalog of simulated observations of Kepler targets.
# Arguments:
- `calc_target_obs::Function`: Function to use in simulating observations of Kepler targets (sky averaging vs. single observer schemes)
- `input::KeplerPhysicalCatalog`: Catalog object of simulated Kepler targets and associated planetary systems to be observed
- `sim_param::SimParam`: Simulation parameter object
# Returns:
Kepler observations catalog object containing all properties observed from the Kepler targets and associated planetary systems that were detected during the simulation.
"""
function observe_kepler_targets(calc_target_obs::Function, input::KeplerPhysicalCatalog, sim_param::SimParam )
#calc_target_obs = get_function(sim_param,"calc_target_obs_sky_ave")
#calc_target_obs = get_function(sim_param,"calc_target_obs_single_obs")
output = KeplerObsCatalog()
if haskey(sim_param,"mem_kepler_target_obs")
output.target = get(sim_param,"mem_kepler_target_obs",Array{KeplerTargetObs}(0) )
end
num_targets_sim_pass_one = get_int(sim_param,"num_targets_sim_pass_one")
if length(output.target) < num_targets_sim_pass_one
output.target = Array{KeplerTargetObs}(undef,num_targets_sim_pass_one)
end
#output.target = Array{KeplerTargetObs}(undef,length(input.target) ) # Replaced to reduce memory allocation
map!(x::KeplerTarget->calc_target_obs(x,sim_param)::KeplerTargetObs, output.target, input.target)
resize!(output.target,length(input.target))
return output
end
# Test if this planetary system has at least one planet that transits (assuming a single observer)
function select_targets_one_obs(ps::PlanetarySystemAbstract)
for pl in 1:length(ps.orbit)
ecc::Float64 = ps.orbit[pl].ecc
incl::Float64 = ps.orbit[pl].incl
a::Float64 = semimajor_axis(ps,pl)
Rstar::Float64 = rsol_in_au*ps.star.radius
if (Rstar > (a*(1-ecc)*(1+ecc))/(1+ecc*sin(ps.orbit[pl].omega))*cos(incl))
return true
end
end
return false
end
#=
function select_targets_one_obs(ps::PlanetarySystemAbstract)
for pl in 1:length(ps.orbit)
if does_planet_transit(ps,pl)
return true
end
end
return false
end
=#
# Remove undetected planets from physical catalog
# TODO: OPT: Maybe create array of bools for which planets to keep, rather than splicing out non-detections?
function generate_obs_targets(cat_phys::KeplerPhysicalCatalog, sim_param::SimParam )
for t in 1:length(cat_phys.target)
kep_targ = cat_phys.target[t]
for ps in 1:length(cat_phys.target[t].sys)
sys = kep_targ.sys[ps]
for pl in length(sys.orbit):-1:1 # Going in reverse since removing planets from end of list first is cheaper than starting at beginning
ecc::Float64 = sys.orbit[pl].ecc
incl::Float64 = sys.orbit[pl].incl
a::Float64 = semimajor_axis(sys,pl)
Rstar::Float64 = rsol_in_au*sys.star.radius
does_it_transit = does_planet_transit(sys, pl)
pdet_if_tr = does_it_transit ? calc_prob_detect_if_transit_with_actual_b(kep_targ, ps, pl, sim_param) : 0.
if !does_it_transit || (rand()>pdet_if_tr)
splice!(cat_phys.target[t].sys[ps].orbit, pl)
splice!(cat_phys.target[t].sys[ps].planet, pl)
end
end
end
end
return cat_phys
end
# The following function is primarily left for debugging.
# Create a catalog of observations of simulated Kepler targets.
function simulated_read_kepler_observations(sim_param::SimParam )
println("# WARNING: Using simulated_read_kepler_observations.")
# if haskey(sim_param,"stellar_catalog")
# star_tab_func = get_function(sim_param, "star_table_setup")
# star_tab_func(sim_param)
# end
num_sys = get_int(sim_param,"num_kepler_targets")
generate_kepler_target = get_function(sim_param,"generate_kepler_target")
target_list = Array{KeplerTarget}(undef,num_sys)
map!(x->generate_kepler_target(sim_param), target_list, 1:num_sys )
cat_phys_cut = generate_obs_targets(KeplerPhysicalCatalog(target_list), sim_param)
calc_target_obs = get_function(sim_param,"calc_target_obs_single_obs")
output = KeplerObsCatalog()
output.target = map(x::KeplerTarget->calc_target_obs(x,sim_param)::KeplerTargetObs, cat_phys_cut.target)
return output
end
"""
read_koi_catalog(sim_param, force_reread=false)
Wrapper function to read Kepler Object of Interest (KOI) catalog given SimParam
# Arguments:
- `sim_param::SimParam`: Simulation parameter object; this function uses the following parameters from the SimParam object:
- koi_catalog: String filename of Kepler Object of Interest catalog (if not provided, defaults to "q1_q17_dr25_koi.csv"
- `force_reread::Bool`: Should the file be read in even if a DataFrame of the KOIs already exists in workspace?
# Returns:
- Dataframe of KOI objects (and their respective properties).
- Vector of booleans indicating which KOIs were designated as planet candidates by the Kepler pipeline and have a valid observed radius ratio and period (necessary for detection probability calculation).
"""
function read_koi_catalog(sim_param::SimParam, force_reread::Bool = false)
filename = convert(String,joinpath(dirname(pathof(ExoplanetsSysSim)),"..", "data", convert(String,get(sim_param,"koi_catalog","q1_q17_dr25_koi.csv")) ) )
return read_koi_catalog(filename, force_reread)
end
"""
read_koi_catalog(filename, force_reread=false)
Function to read Kepler Object of Interest (KOI) catalog given filename string.
# Arguments:
- `filename::String`: String filename of Kepler Object of Interest catalog
- `force_reread::Bool`: Should the file be read in even if a DataFrame of the KOIs already exists in workspace?
# Returns:
- Dataframe of KOI objects (and their respective properties).
- Vector of booleans indicating which KOIs were designated as planet candidates by the Kepler pipeline and have a valid observed radius ratio and period (necessary for detection probability calculation).
"""
function read_koi_catalog(filename::String, force_reread::Bool = false)
local df, usable
if occursin(r".jld2$",filename) && !force_reread
try
data = load(filename)
df = data["koi_catalog"]
usable = data["koi_catalog_usable"]
Core.typeassert(df,DataFrame)
Core.typeassert(usable,Array{Int64,1})
catch
error(string("# Failed to read koi catalog >",filename,"< in jld2 format."))
end
else
try
df = CSV.read(filename,comment="#")
# Choose which KOIs to keep
#is_cand = (csv_data[!,:,koi_disposition_idx] .== "CONFIRMED") | (csv_data[!,:,koi_disposition_idx] .== "CANDIDATE")
is_cand = df[!,:koi_pdisposition] .== "CANDIDATE"
has_radius = .!ismissing.(df[!,:koi_ror])
has_period = .!(ismissing.(df[!,:koi_period]) .| ismissing.(df[!,:koi_period_err1]) .| ismissing.(df[!,:koi_period_err2]))
is_usable = .&(is_cand, has_radius, has_period)
usable = findall(is_usable)
# symbols_to_keep = [:kepid, :kepoi_name, :koi_pdisposition, :koi_score, :koi_ror, :koi_period, :koi_period_err1, :koi_period_err2, :koi_time0bk, :koi_time0bk_err1, :koi_time0bk_err2, :koi_depth, :koi_depth_err1, :koi_depth_err2, :koi_duration, :koi_duration_err1, :koi_duration_err2]
# df = df[usable, symbols_to_keep]
# tmp_df = DataFrame()
# for col in names(df)
# tmp_df[col] = collect(skipmissing(df[col]))
# end
# df = tmp_df
# usable = collect(1:length(df[!,:kepid]))
catch
error(string("# Failed to read koi catalog >",filename,"< in ascii format."))
end
end
return df, usable
end
"""
setup_actual_planet_candidate_catalog(df_star, df_koi, usable_koi, sim_param)
Create (true) catalog of Kepler observations of Kepler targets
# Arguments:
- `df_star::DataFrame`: DataFrame containing all Kepler target stars in catalog
NOTE: df_star is assumed to have fields kepid, mass and radius for all targets in the survey)
- `df_koi::DataFrame`: DataFrame containing all Kepler Object of Interests (KOIs)
- `usable_koi::Array{Integer}`: Array of KOI dataframe row indices corresponding to KOIs to use
- `sim_param::SimParam`: Simulation parameter object
# Returns:
- Kepler observations catalog containing Kepler targets and associated KOIs (to be used as true catalog in comparison with simulated observations)
"""
function setup_actual_planet_candidate_catalog(df_star::DataFrame, df_koi::DataFrame, usable_koi::Array{Int64}, sim_param::SimParam)
local target_obs, num_pl
df_koi = df_koi[usable_koi,:]
# Deprecated code to take a list of KepIDs and KOI names to pre-select a subset of KOIs
# if haskey(sim_param, "koi_subset_csv")
# koi_subset = fill(false, length(df_koi[!,:kepid]))
# subset_df = readtable(convert(String,get(sim_param,"koi_subset_csv", "christiansen_kov.csv")), header=true, separator=' ')
# for n in 1:length(subset_df[!,1])
# subset_colnum = 1
# subset_entry = findall(x->x==subset_df[n,1], df_koi[names(subset_df)[1]])
# # println("Initial cut: ", subset_entry)
# while (length(subset_entry) > 1) & (subset_colnum < length(names(subset_df)))
# subset_colnum += 1
# subsubset = findall(x->round(x*10.)==round(subset_df[n,subset_colnum]*10.), df_koi[subset_entry,names(subset_df)[subset_colnum]])
# # println("Extra cut: ", subset_df[n,subset_colnum], " / ", df_koi[subset_entry,col_idx], " = ", subsubset)
# subset_entry = subset_entry[subsubset]
# end
# if length(subset_entry) > 1
# cand_sub = findall(x->x == "CANDIDATE",df_koi[subset_entry,:koi_pdisposition])
# subset_entry = subset_entry[cand_sub]
# if length(subset_entry) > 1
# println("# Multiple planets found in final cut: ", subset_df[n,1])
# end
# end
# if length(subset_entry) < 1
# println("# No planets found in final cut: ", subset_df[n,:])
# end
# koi_subset[subset_entry] = true
# end
# df_koi = df_koi[findall(koi_subset),:]
# tot_plan = count(x->x, koi_subset)
# end
output = KeplerObsCatalog()
sort!(df_star, (:kepid))
df_obs = join(df_star, df_koi, on = :kepid)
#df_obs = sort!(df_obs, cols=(:kepid))
df_obs = sort!(df_obs, (:kepid))
# if haskey(sim_param, "koi_subset_csv")
# tot_plan -= length(df_obs[!,:kepoi_name])
# println("# Number of planet candidates in subset file with no matching star in table: ", tot_plan)
# end
# Add each KOI planet candidate to Kepler target object
plid = 0
for i in 1:length(df_obs[!,:kepoi_name])
if plid == 0
plid = 1
while i+plid < length(df_obs[!,:kepoi_name]) && df_obs[i+plid,:kepid] == df_obs[i,:kepid]
plid += 1
end
num_pl = plid
target_obs = KeplerTargetObs(num_pl)
#target_obs.star = ExoplanetsSysSim.StarObs(df_obs[i,:radius],df_obs[i,:mass],findfirst(df_star[!,:kepid], df_obs[i,:kepid]))
star_idx = searchsortedfirst(df_star[!,:kepid],df_obs[i,:kepid])
if star_idx > length(df_star[!,:kepid])
@warn "# Couldn't find kepid " * df_star[i,:kepid] * " in df_obs."
star_idx = rand(1:length(df_star[!,:kepid]))
end
target_obs.star = ExoplanetsSysSim.StarObs(df_obs[i,:radius],df_obs[i,:mass],star_idx)
end
target_obs.obs[plid] = ExoplanetsSysSim.TransitPlanetObs(df_obs[i,:koi_period],df_obs[i,:koi_time0bk],df_obs[i,:koi_depth]/1.0e6,df_obs[i,:koi_duration])
target_obs.sigma[plid] = ExoplanetsSysSim.TransitPlanetObs((abs(df_obs[i,:koi_period_err1])+abs(df_obs[i,:koi_period_err2]))/2,(abs(df_obs[i,:koi_time0bk_err1])+abs(df_obs[i,:koi_time0bk_err2]))/2,(abs(df_obs[i,:koi_depth_err1]/1.0e6)+abs(df_obs[i,:koi_depth_err2]/1.0e6))/2,(abs(df_obs[i,:koi_duration_err1])+abs(df_obs[i,:koi_duration_err2]))/2)
#target_obs.prob_detect = ExoplanetsSysSim.SimulatedSystemDetectionProbs{OneObserver}( ones(num_pl), ones(num_pl,num_pl), ones(num_pl), fill(Array{Int64}(undef,0), 1) ) # Made line below to simplify calling
target_obs.prob_detect = ExoplanetsSysSim.OneObserverSystemDetectionProbs(num_pl)
plid -= 1
if plid == 0
push!(output.target,target_obs)
end
end
return output
end
# Two functions below were just for debugging purposes
# Calculate SNR of every planet in simulated catalog
function calc_snr_list(cat::KeplerPhysicalCatalog, sim_param::SimParam)
snrlist = Array{Float64}(undef,0)
for t in 1:length(cat.target)
for p in 1:length(cat.target[t].sys[1].planet)
snr = calc_snr_if_transit(cat.target[t],1,p,sim_param)
if snr>0.0
push!(snrlist,snr)
end
end
end
snrlist[findall(x->x>7.1,snrlist)]
end
# Calculate detection probability (assuming planet transits) for every planet in simulated catalog
function calc_prob_detect_list(cat::KeplerPhysicalCatalog, sim_param::SimParam)
pdetectlist = Array{Float64}(undef,0)
for t in 1:length(cat.target)
for p in 1:length(cat.target[t].sys[1].planet)
#pdet = calc_prob_detect_if_transit(cat.target[t],1,p,sim_param)
pdet = calc_prob_detect_if_transit_with_actual_b(cat.target[t],1,p,sim_param)
if pdet>0.0
push!(pdetectlist,pdet)
end
end
end
idx = findall(x->x>0.0,pdetectlist)
pdetectlist[idx]
end
function test_catalog_constructors(sim_param::SimParam)
cat_phys = generate_kepler_physical_catalog(sim_param)::KeplerPhysicalCatalog
id = findfirst( x->num_planets(x)>=1 , cat_phys.target) # fast forward to first target that has some planets
@assert(length(id)>=1)
semimajor_axis(cat_phys.target[id].sys[1],1)
pdetlist = calc_prob_detect_list(cat_phys,sim_param)
calc_target_obs_single_obs(cat_phys.target[id],sim_param)
calc_target_obs_sky_ave(cat_phys.target[id],sim_param)
@assert( length(cat_phys.target[id].sys[1].planet) == num_planets(cat_phys.target[id]) )
cat_obs = simulated_read_kepler_observations(sim_param)
return (cat_phys, cat_obs)
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 2962 | ## ExoplanetsSysSim/src/koi_table.jl
## (c) 2015 Eric B. Ford
# Note this file is currently not used by SysSim.
# This functionality is now in kepler_catalog.jl
module KoiTable
using ExoplanetsSysSim
#using DataArrays
using DataFrames
using CSV
export setup_koi_table, koi_table, num_koi_for_kepid
df = DataFrame()
usable = Array{Int64}(0)
default_koi_symbols_to_keep = [ :kepoi_name, :koi_vet_stat, :koi_pdisposition, :koi_period, :koi_time0bk, :koi_duration, :koi_ingress, :koi_depth, :koi_ror, :koi_prad, :koi_srad, :koi_smass, :koi_steff, :koi_slogg, :koi_smet ]
function setup(sim_param::SimParam; force_reread::Bool = false, symbols_to_keep::Vector{Symbol} = default_koi_symbols_to_keep )
global df, usable
if haskey(sim_param,"read_koi_catalog") && !force_reread
return df
end
koi_catalog = joinpath(dirname(pathof(ExoplanetsSysSim)),"..", "data", get(sim_param,"koi_catalog","q1_q17_dr25_koi.csv") )
add_param_fixed(sim_param,"read_koi_catalog",true)
try
#df = readtable(koi_catalog)
#df = CSV.read(koi_catalog,nullable=true)
df = CSV.read(koi_catalog,allowmissing=:all)
catch
error(string("# Failed to read koi catalog >",koi_catalog,"<."))
end
has_planet = ! (isna(df[:koi_period]) | isna(df[:koi_time0bk]) | isna(df[:koi_duration]) | isna(:koi_depth) )
has_star = ! ( isna(:koi_srad) )
is_usable = has_planet & has_star
delete!(df, [~(x in symbols_to_keep) for x in names(df)]) # delete columns that we won't be using anyway
usable = find(is_usable)
df = df[usable, symbols_to_keep]
end
setup_koi_table(sim_param::SimParam) = setup(sim_param::SimParam)
function kepids_w_kois()
unique(df[:,:kepid])
end
function df_for_kepid(kepid::Integer)
df[df[:kepid].==kepid,:]
end
function num_koi(kepid::Integer)
sum(df[:kepid].==kepid)
end
function koi_by_kepid(kepid::Integer, plid::Integer, sym::Symbol)
kepid_idx = df[:kepid].==kepid
per_perm = sortperm(df[kepid_idx,:koi_period])
@assert( 1<= plid <= length(per_perm) )
df[kepid_idx,sym][per_perm[plid]]
end
function num_usable()
global usable
length(usable)
end
num_usable_in_koi_table() = num_usable()
function idx(i::Integer)
global usable
@assert( 1<=i<=length(usable) )
usable[i]
end
function koi_table(i::Integer, sym::Symbol)
global df, usable
@assert( 1<=i<=length(usable) )
return df[i,sym]
#return df[usable[i],sym]
end
function koi_table(i::Integer)
global data
return df[i,:]
#return df[usable[i],:]
end
function koi_table(i::Integer, sym::Vector{Symbol})
global df, usable
@assert( 1<=i<=length(usable) )
return df[i,sym]
#return df[usable[i],sym]
end
function koi_table(i::Vector{Integer}, sym::Symbol)
global df, usable
return df[i,sym]
#return df[usable[i],sym]
end
function koi_table(i::Vector{Integer}, sym::Vector{Symbol})
global df, usable
return df[i,sym]
#return df[usable[i],sym]
end
end # module KoiTable
using ExoplanetsSysSim.KoiTable
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 5169 | ## ExoplanetsSysSim/src/limb_darkening.jl
## (c) 2018 Eric B. Ford
@compat abstract type LimbDarkeningParamAbstract end
struct LimbDarkeningParamLinear <: LimbDarkeningParamAbstract
coeff::Tuple{Float64}
# TODO SCI DETAIL: Replace with sensible limits on LD params
LimbDarkeningParamLinear(_u1::Real ) = (!( (-2.5<=_u1<=2.5) )) ? error(string("Invalid quadratic limb darkening parameters: ",_u1," & ", _u2)) : new((_u1,))
end
function depth_at_midpoint(radius_ratio::Float64, ld::LimbDarkeningParamLinear)
c0 = 1-sum(ld.coeff)
omega = c0/4+ld.coeff[1]/6
ksq = 1-radius_ratio^2
tmp0 = c0/4*ksq
tmp2 = ld.coeff[1]/6*ksq^(3//2)
return 1-(tmp0+tmp2)/omega
end
function ratio_from_depth(depth::Float64, ld::LimbDarkeningParamLinear)
c0 = 1-sum(ld.coeff)
omega = c0/4+ld.coeff[1]/6
xn = 1-depth
y0 = (1-depth)*omega
for i in 1:20
tmp0 = c0/4*xn
tmp2 = ld.coeff[1]/6*xn^(3//2)
fn = (tmp0+tmp2)-y0
tmp_0 = c0
tmp_2 = ld.coeff[1]*xn^(1//2)
f_n = 0.25*(tmp_0+tmp_2)
xn -= (fn/f_n)
if (fn/f_n)/xn < 1e-8
break
end
end
return sqrt(1-xn)
end
struct LimbDarkeningParamQuadratic <: LimbDarkeningParamAbstract
coeff::Tuple{Float64,Float64}
# TODO SCI DETAIL: Replace with sensible limits on LD params
LimbDarkeningParamQuadratic(_u1::Real, _u2::Real ) = (!( (-2.5<=_u1<=2.5) && (-2.5<=_u2<=2.5) )) ? error(string("Invalid quadratic limb darkening parameters: ",_u1," & ", _u2)) : new((_u1,_u2))
end
function depth_at_midpoint(radius_ratio::Float64, ld::LimbDarkeningParamQuadratic)
c0 = 1-sum(ld.coeff)
omega = c0/4+(ld.coeff[1]+2*ld.coeff[2])/6-ld.coeff[2]/8
ksq = 1-radius_ratio^2
tmp0 = c0/4*ksq
tmp2 = (ld.coeff[1]+2*ld.coeff[2])/6*ksq^(3//2)
tmp4 = -ld.coeff[2]/8*ksq^2
return 1-(tmp0+tmp2+tmp4)/omega
end
function ratio_from_depth(depth::Float64, ld::LimbDarkeningParamQuadratic)
c0 = 1-sum(ld.coeff)
omega = c0/4+(ld.coeff[1]+2*ld.coeff[2])/6-ld.coeff[2]/8
xn = 1-depth
y0 = (1-depth)*omega
for i in 1:20
tmp0 = c0/4*xn
tmp2 = (ld.coeff[1]+2*ld.coeff[2])/6*xn^(3//2)
tmp4 = -ld.coeff[2]/8*xn^2
fn = (tmp0+tmp2+tmp4)-y0
tmp_0 = c0
tmp_2 = (ld.coeff[1]+2*ld.coeff[2])*xn^(1//2)
tmp_4 = -ld.coeff[2]*xn
f_n = 0.25*(tmp_0+tmp_2+tmp_4)
xn -= (fn/f_n)
if (fn/f_n)/xn < 1e-8
break
end
end
return sqrt(1-xn)
end
struct LimbDarkeningParam4thOrder <: LimbDarkeningParamAbstract
coeff::Tuple{Float64,Float64,Float64,Float64}
# TODO SCI DETAIL: Replace with sensible limits on LD params
LimbDarkeningParam4thOrder(_c1::Real, _c2::Real, _c3::Real, _c4::Real ) = (!( (-2.5<=_c1<=2.5) && (-2.5<=_c2<=2.5) && (-2.5<=_c3<=2.5) && (-2.5<=_c4<=2.5) )) ? error(string("Invalid limb darkening parameters: ",_c1," , ", _c2, ", ",_c3," , ",_c4)) : new((_c1,_c2,_c3,_c4))
end
function depth_at_midpoint(radius_ratio::Float64, ld::LimbDarkeningParam4thOrder)
c0 = 1-sum(ld.coeff)
omega = c0/4+sum(ld.coeff./(5:8))
ksq = 1-radius_ratio^2
tmp0 = c0/4*ksq
tmp1 = ld.coeff[1]/5*ksq^(5//4)
tmp2 = ld.coeff[2]/6*ksq^(3//2)
tmp3 = ld.coeff[3]/7*ksq^(7//4)
tmp4 = ld.coeff[4]/8*ksq^2
return 1-(tmp0+tmp1+tmp2+tmp3+tmp4)/omega
end
# Cite Mandel & Agol 2002
# See https://arxiv.org/pdf/astro-ph/0210099.pdf and https://faculty.washington.edu/agol/mandel_agol_errata.pdf
# https://github.com/ericagol/ExoJulia/blob/master/ExoJulia/Transit/occultsmall.jl
function depth_small_planet(z::T,p::T,ld::LimbDarkeningParam4thOrder) where T<:Real
@assert p<=0.2
@assert sum(c)<=1
c = ld.coeff
one_over_Omega_times_4 = 1/(1-c[1]/5-c[2]/3-3*c[3]/7-c[4]/2)
if z == zero(z)
F = p*p*one_over_Omega_times_4
elseif z <= 1-p
sig1=sqrt(sqrt(1-(z-p)^2))
sig2=sqrt(sqrt(1-(z+p)^2))
ave_surface_intensity=(1-c[1]*(1+(sig2^5-sig1^5)/(5*p*z))-c[2]*(1+(sig2^6-sig1^6)/(6*p*z))-c[3]*(1+(sig2^7-sig1^7)/(7*p*z))-c[4]*(p^2+z^2))
F = p*p*ave_surface_intensity*one_over_Omega_times_4
elseif z<1+p
x=1-(z-p)^2
tmp=1-c[1]*(1-0.8*x^(1//4))-c[2]*(1-2//3*sqrt(x))-c[3]*(1-4//7*x^(3//4))-c[4]*(1-0.5*x)
F = tmp*(p^2*acos((z-1)/p)-(z-1)*sqrt(p^2-(z-1)^2))*one_over_Omega_times_4/pi
else
F = zero(T)
end
return 1-F
end
function ratio_from_depth(depth::Float64, ld::LimbDarkeningParam4thOrder)
c0 = 1-sum(ld.coeff)
omega = c0/4+sum(ld.coeff./(5:8))
xn = 1-depth
y0 = (1-depth)*omega
for i in 1:20
tmp0 = c0/4*xn
tmp1 = ld.coeff[1]/5*xn^(5//4)
tmp2 = ld.coeff[2]/6*xn^(3//2)
tmp3 = ld.coeff[3]/7*xn^(7//4)
tmp4 = ld.coeff[4]/8*xn^2
fn = (tmp0+tmp1+tmp2+tmp3+tmp4)-y0
tmp_0 = c0
tmp_1 = ld.coeff[1]*xn^(1//4)
tmp_2 = ld.coeff[2]*xn^(1//2)
tmp_3 = ld.coeff[3]*xn^(3//4)
tmp_4 = ld.coeff[4]*xn
f_n = 0.25*(tmp_0+tmp_1+tmp_2+tmp_3+tmp_4)
xn -= (fn/f_n)
if (fn/f_n)/xn < 1e-8
break
end
end
return sqrt(1-xn)
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 1014 | ## ExoplanetsSysSim/src/orbit.jl
## (c) 2015 Eric B. Ford
#export Orbit
struct Orbit
P::Float64 # days # QUERY: Should we store P or a here?
ecc::Float64
incl::Float64 # radians; inclination relative to sky of observer
omega::Float64 # radians
asc_node::Float64 # radians
mean_anom::Float64 # radians # QUERY: Should we store t_0 or mean_anom here?
end
#Orbit() = Orbit(0.0,0.0,0.0,0.0,0.0,0.0) # Comment out, so don't accidentally have invalid orbits
# This will only work if Orbit were mutable. Is that better or worse? Let's test and see....
function set!(o::Orbit, P::Float64, e::Float64, i::Float64, w::Float64, asc_node::Float64, M::Float64)
o.P = P
o.ecc = e
o.incl = i
o.omega = w
o.asc_node = asc_node
o.mean_anom = M
return o
end
function test_orbit_constructors()
#orb = Orbit()
orb = Orbit(1.0, 0.03, 0.5*pi, 0.0, 0.0, pi)
if !isimmutable(orb)
set!(orb,1.0, 0.03, 0.5*pi, 0.0, 0.0, pi)
end
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 570 | ## ExoplanetsSysSim/src/planet.jl
## (c) 2015 Eric B. Ford
# export Planet
struct Planet
radius::Float64 # solar radii
mass::Float64 # solar masses
id::Int64 # id number (for purposes of tracking or grouping planets)
end
function Planet(radius::Float64, mass::Float64; id::Int64=0)
pl = Planet(radius, mass, id)
end
#Planet() = Planet(0.0, 0.0) # Commented out, so don't accidentally have invalid Planets
function test_planet_constructors(sim_param::SimParam)
#blank = Planet()
earth = Planet(0.0091705248,3e-6)
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 56764 | #using Distributions
#include("constants.jl")
#include("orbit.jl")
#include("planet.jl")
#if !@isdefined PlanetarySystemAbstract
@compat abstract type PlanetarySystemAbstract end
struct SystemPlane
incl::Float64 # radians; relative to sky plane
asc_node::Float64 # radians; relative to sky plane
end
struct PlanetarySystem{StarT<:StarAbstract} <: PlanetarySystemAbstract
star::StarT
planet::Vector{Planet}
orbit::Vector{Orbit}
system_plane::SystemPlane
# TODO DETAIL: Setup inner constructor to enforce equal number of planets & orbits
function PlanetarySystem{StarT}(s::StarT, p::AbstractVector{Planet}, o::AbstractVector{Orbit}, sp::SystemPlane) where {StarT<:StarAbstract}
@assert(length(p)==length(o)) # else error(string("Number of planets must match number of orbits: Np= ",length(p)," No= ",length(o)))
new(s,p,o,sp)
end
end
const PlanetarySystemSingleStar = PlanetarySystem{SingleStar}
#end
function PlanetarySystem(s::StarT) where {StarT<:StarAbstract}
PlanetarySystem(s,Vector{Planet}(undef,0),Vector{Orbit}(undef,0)) # Constructor for a Planetary System with no planets
end
function PlanetarySystem(s::StarT, p::Planet, o::Orbit) where {StarT<:StarAbstract}
PlanetarySystem(s,[p],[o],SystemPlane(0.,0.)) # Constructor for a single Planet System
end
function PlanetarySystem(s::StarT, p::AbstractVector{Planet}, o::AbstractVector{Orbit}) where {StarT<:StarAbstract}
PlanetarySystem{StarT}(s,p,o,SystemPlane(0.,0.)) # Constructor for a single Planet System
end
function PlanetarySystem(s::StarT, p::AbstractVector{Planet}, o::AbstractVector{Orbit}, sp::SystemPlane) where {StarT<:StarAbstract}
PlanetarySystem{StarT}(s,p,o,sp)
end
function PlanetarySystem(ps::PlanetarySystem{StarT}, keep::AbstractVector{Int64}) where {StarT<:StarAbstract} # Why doesn't this work?
PlanetarySystem{StarT}(ps.star,ps.planet[keep],ps.orbit[keep])
end
function star( ps::PlanetarySystem{StarT} )::StarT where {StarT<:StarAbstract}
return ps.star
end
function planets( ps::PlanetarySystem{StarT} )::Vector{Planet} where {StarT<:StarAbstract}
return ps.planet
end
function orbits( ps::PlanetarySystem{StarT} )::Vector{Orbit} where {StarT<:StarAbstract}
return ps.orbit
end
#function PlanetarySystemSingleStar(ps::PlanetarySystemSingleStar, keep::Vector{Int64})
#function PlanetarySystem{StarT<:StarAbstract}(ps::PlanetarySystem{StarT}, keep::Vector{Int64})
# PlanetarySystem(ps.star,ps.planet[keep],ps.orbit[keep])
#end
flux(ps::PlanetarySystem{StarT}) where {StarT<:StarAbstract} = flux(star(ps))
#flux(ps::PlanetarySystem{Star}) = flux(ps.star)
#flux(ps::PlanetarySystem{BinaryStar}) = flux(ps.star)
#flux(ps::PlanetarySystem{MultipleStar}) = flux(ps.star)
function num_planets(s::PlanetarySystem{StarT}) where {StarT<:StarAbstract}
@assert( length(planets(s)) == length(orbits(s)) ) # TODO OPT: Deactivate inner assert's like this for speed once tested
return length(planets(s))
end
function calc_hill_sphere(a::Float64, mu::Float64)
return a*(mu/3)^(1//3)
end
function calc_mutual_hill_radii(ps::PlanetarySystem{StarT}, pl1::Int64, pl2::Int64) where StarT <: StarAbstract
mu = (ps.planet[pl1].mass + ps.planet[pl2].mass)/ps.star.mass
a = 0.5*(ps.orbit[pl1].a + ps.orbit[pl2].a)
return calc_hill_sphere(a, mu)
end
function test_stability_circular(P::AbstractVector{Float64}, mass::AbstractVector{Float64}, star_mass::Float64, sim_param::SimParam)
@assert length(P) == length(mass)
min_num_mutual_hill_radii = get_real(sim_param, "num_mutual_hill_radii")
found_instability = false
order = sortperm(P)
a2 = semimajor_axis(P[order[1]], mass[order[1]]+star_mass)
for pl in 1:(length(P)-1)
a1 = a2 # semimajor_axis(P[order[pl]], mass[order[pl]]+star_mass)
a2 = semimajor_axis(P[order[pl+1]], mass[order[pl+1]]+star_mass)
a = 0.5*(a1+a2)
mu = (mass[order[pl]] + mass[order[pl+1]])/star_mass
mutual_hill_radius = calc_hill_sphere(a, mu)
if a2-a1 < min_num_mutual_hill_radii*mutual_hill_radius
found_instability = true
break
end
end # loop over neighboring planet pairs within cluster
return !found_instability
end
function test_stability(P::AbstractVector{Float64}, mass::AbstractVector{Float64}, star_mass::Float64, sim_param::SimParam; ecc::AbstractVector{Float64}=zeros(length(P)))
@assert length(P) == length(mass) == length(ecc)
min_num_mutual_hill_radii = get_real(sim_param, "num_mutual_hill_radii")
found_instability = false
order = sortperm(P)
a2 = semimajor_axis(P[order[1]], mass[order[1]]+star_mass)
for pl in 1:(length(P)-1)
a1 = a2 # semimajor_axis(P[order[pl]], mass[order[pl]]+star_mass)
a2 = semimajor_axis(P[order[pl+1]], mass[order[pl+1]]+star_mass)
a = 0.5*(a1+a2)
mu = (mass[order[pl]] + mass[order[pl+1]])/star_mass
mutual_hill_radius = calc_hill_sphere(a, mu)
e1 = ecc[order[pl]]
e2 = ecc[order[pl+1]]
if a2*(1-e2)-a1*(1+e1) < min_num_mutual_hill_radii*mutual_hill_radius
found_instability = true
break
end
end # loop over neighboring planet pairs within cluster
return !found_instability
end
function is_period_ratio_near_resonance(period_ratio::Float64, sim_param::SimParam)
resonance_width = get_real(sim_param, "resonance_width")
resonance_width_factor = 1+resonance_width
period_ratios_to_check = get_any(sim_param, "period_ratios_mmr", Array{Float64,1})
result = false
for period_ratio_mmr in period_ratios_to_check
if period_ratio_mmr <= period_ratio <= period_ratio_mmr*resonance_width_factor
result = true
break
end
end
return result
end
function calc_if_near_resonance(P::AbstractVector{Float64}, sim_param::SimParam)
@assert issorted(P) # TODO: OPT: Could remove once know it is safe
result = falses(length(P))
if length(P) >= 2
for i in 1:(length(P)-1)
if is_period_ratio_near_resonance(P[i+1]/P[i], sim_param)
result[i] = true
result[i+1] = true
end # near mmr
end # planets
end # at least two planets
return result
end
# Code to generate simple planetary systems. For more sophisticated algorithms, see clustered model in He et al. 2019
function generate_planet_mass_from_radius_powerlaw(r::Float64, sim_param::SimParam)
mr_power_index::Float64 = get_real(sim_param,"mr_power_index")
mr_const::Float64 = get_real(sim_param,"mr_const")
mr_max_mass::Float64 = get_real(sim_param,"mr_max_mass")
m = mr_const*earth_mass*(r/earth_radius)^mr_power_index
if m > mr_max_mass
m = mr_max_mass
end
return m
end
function generate_planet_mass_from_radius_powerlaw(r::Float64, s::Star, o::Orbit, sim_param::SimParam) # TODO USER SCI: This is importnat if you are using a stability criteria. In that case, this should be replacde w/ better M-R relationship. See Matthias's clustered model for example.
generate_planet_mass_from_radius_powerlaw(r,sim_param)
end
function generate_num_planets_poisson(lambda::Real, max_planets::Integer; min_planets::Integer = 0)
##### Note: this function produces odd behaviour if lambda < min_planets due to bugs in Distributions.Truncated(); for example, Distributions.Truncated(Distributions.Poisson(lambda),min_planets,max_planets) returns only values >=2 if min_planets=1 and lambda<~0.95
if lambda < min_planets*1e-3
return min_planets
end
bug_fixed = false # TODO OPT: true case should work, but Danley found bug in Distributions package. Revert once fixed for speed.
local n
if bug_fixed
# d = Distributions.Truncated(Distributions.Poisson(lambda),min_planets,max_planets)
d = truncated(Distributions.Poisson(lambda),min_planets,max_planets)
n = rand(d)
else
if min_planets == 0
min_planets = -1
end
# d = Distributions.Truncated(Distributions.Poisson(lambda),min_planets,max_planets)
d = truncated(Distributions.Poisson(lambda),min_planets,max_planets)
n = rand(d)
#=
n = -1
while !(min_planets<=n<=max_planets)
n = rand(Distributions.Poisson(lambda))
end
=#
end
return n
end
function draw_truncated_poisson(lambda::Real; min::Integer=0, max::Integer=20, n::Integer=1)
pmf = [(lambda^k*exp(-lambda))/factorial(k) for k in min:max]
pmf ./= sum(pmf)
cmf = zeros(Float64,length(pmf))
for i in 1:length(pmf)
for j in i:-1:1
cmf[i] += pmf[j]
end
end
result = Array{Int64}(undef,n)
for i in 1:n
u = rand()
result[i] = findfirst(x-> x>=u,cmf) + min -1
end
return result
end
function generate_num_planets_poisson(s::Star, sim_param::SimParam)
lambda::Float64 = exp(get_real(sim_param,"log_eta_pl"))
max_tranets_in_sys::Int64 = get_int(sim_param,"max_tranets_in_sys")
generate_num_planets_poisson(lambda,max_tranets_in_sys)
end
function generate_period_and_sizes_log_normal(s::Star, sim_param::SimParam; num_pl::Integer = 1) # TODO USER SCI: User should make sure planetary properties are being drawn appropriately for their scientific purposes
mu_log_r::Float64 = get_real(sim_param,"mean_log_planet_radius")
sigma_log_r::Float64 = get_real(sim_param,"sigma_log_planet_radius")
mu_log_P::Float64 = get_real(sim_param,"mean_log_planet_period")
sigma_log_P::Float64 = get_real(sim_param,"sigma_log_planet_period")
min_period::Float64 = get_real(sim_param,"min_period")
max_period::Float64 = get_real(sim_param,"max_period")
min_radius::Float64 = get_real(sim_param,"min_radius")
max_radius::Float64 = get_real(sim_param,"max_radius")
max_draws::Int64 = 100
if sigma_log_r <= 0. || sigma_log_P<=0.
println("# mu_log_r= ", mu_log_r, " sigma_log_r= ", sigma_log_r, " mu_log_P= ", mu_log_P, " sigma_log_P= ", sigma_log_P)
end
rdist = LogNormal(mu_log_r,sigma_log_r)
Pdist = LogNormal(mu_log_P,sigma_log_P)
#Rlist = rand(rdist,num_pl)
#Plist = rand(Pdist,num_pl)
#idx_keep = find(i->(min_radius<=Rlist[i]<=max_radius) && (min_period<=Plist[i]<=max_period), 1:num_pl )
#return Plist[idx_keep], Rlist[idx_keep] # replaced because want to return exactly num_pl. Could use truncated to restore above code.
Rlist = zeros(num_pl)
Plist = zeros(num_pl)
for i in 1:num_pl
j = 0
while ! (min_radius<Rlist[i]<=max_radius) && j<max_draws
Rlist[i] = rand(rdist)
j+=1
end
if j>=max_draws
println("# Struggled to draw size for: ",mu_log_r, " ", sigma_log_r)
end
j = 0
while ! (min_period<Plist[i]<=max_period) && j<max_draws
Plist[i] = rand(Pdist)
j+=1
end
if j>=max_draws
println("# Struggled to draw period for: ",mu_log_P, " ", sigma_log_P)
end
end
return Plist, Rlist
end
function draw_power_law(n::Real, x0::Real, x1::Real, num_pl::Integer)
if n != -1
return ((x1^(n+1) - x0^(n+1)).*rand(num_pl) .+ x0^(n+1)).^(1/(n+1))
else #if n == -1
return x0*(x1/x0).^rand(num_pl)
end
end
function draw_power_law!(out::Array, n::Real, x0::Real, x1::Real, num_pl::Integer)
@assert length(out)==num_pl
if n != -1
return out .= ((x1^(n+1) - x0^(n+1)).*rand(num_pl) .+ x0^(n+1)).^(1/(n+1))
else #if n == -1
return out .= x0*(x1/x0).^rand(num_pl)
end
end
function draw_broken_power_law(n1::Real, n2::Real, x0::Real, x1::Real, xb::Real, num_pl::Integer)
#x0 and x1 are the lower and upper truncation limits, and xb is the break point, i.e. x0 <= xb <= x1 (all must be positive)
#n1 and n2 are the power law indices between x0 and xb, and xb and x1, respectively (can be positive or negative)
@assert(x0 <= xb <= x1)
@assert(num_pl >= 1)
u_draws = rand(num_pl) #'num_pl' draws from the uniform distribution between 0 and 1
x_draws = zeros(num_pl)
if (n1 != -1) & (n2 != -1)
C1 = 1.0/(((xb^(n1+1) - x0^(n1+1))/(n1+1)) + ((xb^(n1-n2)*(x1^(n2+1) - xb^(n2+1)))/(n2+1))) #normalization constant
ub = (C1*(xb^(n1+1) - x0^(n1+1)))/(n1+1) #break point in u, between 0 and 1
for (i,u) in enumerate(u_draws)
if u <= ub
x_draws[i] = (((n1+1)*u)/C1 + x0^(n1+1))^(1/(n1+1))
else #if u > ub
x_draws[i] = (((n2+1)/(C1*xb^(n1-n2)))*(u - (C1*(xb^(n1+1) - x0^(n1+1)))/(n1+1)) + xb^(n2+1))^(1/(n2+1))
end
end
elseif (n1 == -1) & (n2 != -1)
C1 = 1.0/(log(xb/x0) + ((xb^(-1-n2))*(x1^(n2+1)) - 1)/(n2+1)) #normalization constant
ub = C1*log(xb/x0) #break point in u, between 0 and 1
for (i,u) in enumerate(u_draws)
if u <= ub
x_draws[i] = x0*exp(u/C1)
else #if u > ub
x_draws[i] = (((n2+1)/(C1*xb^(-1-n2)))*(u - C1*log(xb/x0)) + xb^(n2+1))^(1/(n2+1))
end
end
elseif (n1 != -1) & (n2 == -1)
C1 = 1.0/(((xb^(n1+1) - x0^(n1+1))/(n1+1)) + (xb^(n1+1))*log(x1/xb)) #normalization constant
ub = (C1*(xb^(n1+1) - x0^(n1+1)))/(n1+1) #break point in u, between 0 and 1
for (i,u) in enumerate(u_draws)
if u <= ub
x_draws[i] = (((n1+1)*u)/C1 + x0^(n1+1))^(1/(n1+1))
else #if u > ub
x_draws[i] = xb*exp((1/(C1*xb^(n1+1)))*(u - (C1*(xb^(n1+1) - x0^(n1+1)))/(n1+1)))
end
end
else #if n1 == -1 and n2 == -1 (i.e. it is a single power-law with index of -1)
for (i,u) in enumerate(u_draws)
x_draws[i] = x0*exp(u*log(x1/x0))
end
end
return x_draws
end
function generate_periods_power_law(s::Star, sim_param::SimParam; num_pl::Integer = 1)
power_law_P::Float64 = get_real(sim_param,"power_law_P")
min_period::Float64 = get_real(sim_param,"min_period")
max_period::Float64 = get_real(sim_param,"max_period")
Plist = draw_power_law(power_law_P,min_period,max_period, num_pl)
return Plist
end
function generate_periods_broken_power_law(s::Star, sim_param::SimParam; num_pl::Integer = 1)
power_law_P1::Float64 = get_real(sim_param,"power_law_P1")
power_law_P2::Float64 = get_real(sim_param,"power_law_P2")
min_period::Float64 = get_real(sim_param,"min_period")
max_period::Float64 = get_real(sim_param,"max_period")
break_period::Float64 = get_real(sim_param,"break_period")
Plist = draw_broken_power_law(power_law_P1,power_law_P2,min_period,max_period,break_period, num_pl)
return Plist
end
function generate_sizes_power_law(s::Star, sim_param::SimParam; num_pl::Integer = 1)
power_law_r::Float64 = get_real(sim_param,"power_law_r")
min_radius::Float64 = get_real(sim_param,"min_radius")
max_radius::Float64 = get_real(sim_param,"max_radius")
Rlist = draw_power_law(power_law_r,min_radius,max_radius, num_pl)
return Rlist
end
function generate_sizes_broken_power_law(s::Star, sim_param::SimParam; num_pl::Integer = 1)
power_law_r1::Float64 = get_real(sim_param,"power_law_r1")
power_law_r2::Float64 = get_real(sim_param,"power_law_r2")
min_radius::Float64 = get_real(sim_param,"min_radius")
max_radius::Float64 = get_real(sim_param,"max_radius")
break_radius::Float64 = get_real(sim_param,"break_radius")
Rlist = draw_broken_power_law(power_law_r1,power_law_r2,min_radius,max_radius,break_radius, num_pl)
return Rlist
end
function generate_period_and_sizes_power_law(s::Star, sim_param::SimParam; num_pl::Integer = 1)
return (generate_periods_power_law(s, sim_param, num_pl=num_pl), generate_sizes_power_law(s, sim_param, num_pl=num_pl))
end
using SpecialFunctions
cdf_lognormal(x::Float64; μ::Float64=0., σ::Float64=1.) = 0.5*erfc(-(log(x) - μ)/(σ*sqrt(2.)))
function invert_cdf_lognormal(y::Float64; μ::Float64=0., σ::Float64=1.)
@assert 0<=y<=1
return exp(μ - sqrt(2.)*σ*erfcinv(2*y))
end
function cdf_power_law(x::Float64; x0::Float64, x1::Float64, α::Float64)
@assert x0 <= x <= x1
if α != -1
return (x^(α+1) - x0^(α+1))/(x1^(α+1) - x0^(α+1))
else # if α == -1
return log(x/x0)/log(x1/x0)
end
end
function invert_cdf_power_law(y::Float64; x0::Float64, x1::Float64, α::Float64)
@assert x0 < x1
if α != -1
return (x0^(α+1) + (x1^(α+1) - x0^(α+1))*y)^(1/(α+1))
else # if α == -1
return x0*(x1/x0)^y
end
end
"""
draw_segmented_uniform(segments)
Draw a random uniform variable given segments in the interval (0,1).
# Arguments:
- `segments::Array{Tuple{Float64,Float64},1}`: array of segments (a,b) between 0 and 1.
NOTE: all segments must be between [0,1] and be non-overlapping!
# Returns:
A value (float) contained in one of the given segments, drawn uniformly.
"""
function draw_segmented_uniform(segments::Array{Tuple{Float64,Float64},1})
# Note: "segments" must be non-overlapping
n_segs = length(segments) # number of segments
@assert n_segs > 0
for (i,seg) in enumerate(segments)
@assert 0 <= seg[1] < seg[2] <= 1 # check that all segments are valid
if i != n_segs
@assert segments[i][2] < segments[i+1][1] # check to make sure there are no overlapping segments
end
end
seg_lengths = [seg[2]-seg[1] for seg in segments]
sum_lengths = sum(seg_lengths)
u_draw = rand()
for (i,seg) in enumerate(segments)
# Map the union of the segments uniformly to the domain [0,1]:
sum_l = sum(seg_lengths[1:i-1])
if sum_l <= u_draw*sum_lengths < sum_l + seg_lengths[i]
return rand(Uniform(seg[1], seg[2]))
end
end
end
"""
compute_unstable_regions_periods_given_planets(P, mass, insert_pl_mass, star_mass, sim_param; ecc=zeros(length(P)), insert_pl_ecc=0., use_mutualHill=true, verbose=false)
Compute the unstable intervals in orbital period given the existing planets and the mass and eccentricity of the planet we are trying to insert.
# Arguments:
- `P::AbstractVector{Float64}`: periods of the existing planets.
- `mass::AbstractVector{Float64}`: masses of the existing planets.
- `insert_pl_mass::Float64`: mass of the planet we are trying to insert.
- `star_mass::Float64`: mass of the star.
- `sim_param::SimParam`: a SimParam object containing various simulation parameters.
- `ecc::AbstractVector{Float64}=zeros(length(P))`: eccentricities of the existing planets (defaults to circular).
- `insert_pl_ecc::Float64=0.`: eccentricity of the planet we are trying to insert (defaults to circular).
- `use_mutualHill::Bool=true`: whether to use the minimum separation in terms of the mutual Hill radii (default) or the Hill radii (if false).
- `verbose::Bool=false`: whether to print various messages.
NOTE: the existing planets do not have to be sorted in period.
# Returns:
`P_segments_unstable::Array{Tuple{Float64,Float64},1}`: an array of segments. Inserting the planet somewhere inside one of these segments would deem the system unstable.
NOTE: these segments may be overlapping!
"""
function compute_unstable_regions_periods_given_planets(P::AbstractVector{Float64}, mass::AbstractVector{Float64}, insert_pl_mass::Float64, star_mass::Float64, sim_param::SimParam; ecc::AbstractVector{Float64}=zeros(length(P)), insert_pl_ecc::Float64=0., use_mutualHill::Bool=true, verbose::Bool=false)
@assert length(P) == length(mass) == length(ecc)
min_num_mutual_hill_radii = get_real(sim_param, "num_mutual_hill_radii")
order = sortperm(P)
P_segments_unstable = Array{Tuple{Float64,Float64},1}(undef,length(P))
for pl in 1:length(P)
a = semimajor_axis(P[order[pl]], mass[order[pl]]+star_mass)
if use_mutualHill
mu = (mass[order[pl]] + insert_pl_mass)/star_mass
term = (min_num_mutual_hill_radii/2.)*(mu/3.)^(1//3)
a_lower = a*(1. - ecc[order[pl]] - term)/(1. + insert_pl_ecc + term)
a_upper = (0 < 1-insert_pl_ecc-term) ? a*(1. + ecc[order[pl]] + term)/(1. - insert_pl_ecc - term) : Inf # 0<(1-insert_pl_ecc-term) must be true or else inequality flips sign and there are no physical solutions for a_upper
else # use Hill radii
mu = mass[order[pl]]/star_mass
hill_radius = calc_hill_sphere(a, mu)
a_lower, a_upper = a*(1. - ecc[order[pl]]) - hill_radius*min_num_mutual_hill_radii, a*(1. + ecc[order[pl]]) + hill_radius*min_num_mutual_hill_radii
end
a_lower = max(a_lower, 0.) # set a_lower=0 if negative
@assert 0 <= a_lower < a_upper
P_lower, P_upper = period_given_semimajor_axis(a_lower, insert_pl_mass+star_mass), period_given_semimajor_axis(a_upper, insert_pl_mass+star_mass)
if verbose
println("P_blocked: ", (P_lower, P_upper))
end
P_segments_unstable[pl] = (P_lower, P_upper)
end
return P_segments_unstable
end
"""
compute_unstable_mutualHill_regions_periodscales_given_clusters(pl_per_cl, P_cl, mass_cl, insert_cl_ρ, insert_cl_mass, star_mass, sim_param; ecc_cl=zeros(length(P_cl)), insert_cl_ecc=zeros(length(insert_cl_mass)), verbose=false)
Compute the unstable intervals in period scale given the existing clusters and the cluster we are trying to insert, requiring that no clusters overlap.
# Arguments:
- `pl_per_cl::Vector{Int64}`: number of planets in each existing cluster.
- `P_cl::AbstractVector{Float64}`: periods of the planets in the existing clusters.
- `mass_cl::AbstractVector{Float64}`: masses of the planets in the existing clusters.
- `insert_cl_ρ::AbstractVector{Float64}`: unscaled periods of the planets in the cluster we are trying to insert.
- `insert_cl_mass::AbstractVector{Float64}`: masses of the planets in the cluster we are trying to insert.
- `star_mass::Float64`: mass of the star.
- `sim_param::SimParam`: a SimParam object containing various simulation parameters.
- `ecc_cl::AbstractVector{Float64}=zeros(length(P_cl))`: eccentricities of the planets in the existing clusters (defaults to circular).
- `insert_cl_ecc::AbstractVector{Float64}=zeros(length(insert_cl_mass))`: eccentricities of the planets in the cluster we are trying to insert (defaults to circular).
- `verbose::Bool=false`: whether to print various messages.
NOTE: the existing clusters must be grouped (and in the order implied by `pl_per_cl`), but the planets do not have to be sorted in period.
# Returns:
`Pc_segments_unstable_nonoverlapping::Array{Tuple{Float64,Float64},1}`: an array of (non-overlapping) segments. Inserting the cluster somewhere inside one of these segments would either deem the system unstable, or cause clusters to overlap.
"""
function compute_unstable_mutualHill_regions_periodscales_given_clusters(pl_per_cl::Vector{Int64}, P_cl::AbstractVector{Float64}, mass_cl::AbstractVector{Float64}, insert_cl_ρ::AbstractVector{Float64}, insert_cl_mass::AbstractVector{Float64}, star_mass::Float64, sim_param::SimParam; ecc_cl::AbstractVector{Float64}=zeros(length(P_cl)), insert_cl_ecc::AbstractVector{Float64}=zeros(length(insert_cl_mass)), verbose::Bool=false)
n_cl = length(pl_per_cl)
@assert all(pl_per_cl .> 0)
@assert sum(pl_per_cl) == length(P_cl) == length(mass_cl) == length(ecc_cl)
@assert length(insert_cl_ρ) == length(insert_cl_mass) == length(insert_cl_ecc)
min_num_mutual_hill_radii = get_real(sim_param, "num_mutual_hill_radii")
# Get the inner-most and outer-most planets in the cluster we are trying to insert:
order_insert_cl = sortperm(insert_cl_ρ) # ρ denotes unscaled periods in this function
insert_pl_in_ρ, insert_pl_in_mass, insert_pl_in_ecc = insert_cl_ρ[order_insert_cl[1]], insert_cl_mass[order_insert_cl[1]], insert_cl_ecc[order_insert_cl[1]]
insert_pl_out_ρ, insert_pl_out_mass, insert_pl_out_ecc = insert_cl_ρ[order_insert_cl[end]], insert_cl_mass[order_insert_cl[end]], insert_cl_ecc[order_insert_cl[end]]
max_ratio = insert_pl_out_ρ/insert_pl_in_ρ
Pc_segments_unstable = Array{Tuple{Float64,Float64},1}(undef,n_cl)
pl_start = 1
pl_stop = 0
for c in 1:n_cl
pl_stop += pl_per_cl[c]
order = (pl_start - 1) .+ sortperm(P_cl[pl_start:pl_stop])
# Compare the inner-most planet in this cluster to the outer-most planet in the cluster we are trying to fit in to calculate the lower bound for the unstable region:
a_in = semimajor_axis(P_cl[order[1]], mass_cl[order[1]]+star_mass)
mu = (mass_cl[order[1]] + insert_pl_out_mass)/star_mass
term = (min_num_mutual_hill_radii/2.)*(mu/3.)^(1//3)
a_lower = a_in*(1. - ecc_cl[order[1]] - term)/(1. + insert_pl_out_ecc + term)
a_lower = max(a_lower, 0.) # set a_lower=0 if negative
P_lower = period_given_semimajor_axis(a_lower, insert_pl_out_mass+star_mass)
# Compare the outer-most planet in this cluster to the inner-most planet in the cluster we are trying to fit in to calculate the upper bound for the unstable region:
a_out = semimajor_axis(P_cl[order[end]], mass_cl[order[end]]+star_mass)
mu = (mass_cl[order[end]] + insert_pl_in_mass)/star_mass
term = (min_num_mutual_hill_radii/2.)*(mu/3.)^(1//3)
a_upper = (0 < 1-insert_pl_in_ecc-term) ? a_out*(1. + ecc_cl[order[end]] + term)/(1. - insert_pl_in_ecc - term) : Inf # 0<(1-insert_pl_ecc-term) must be true or else inequality flips sign and there are no physical solutions for a_upper
P_upper = period_given_semimajor_axis(a_upper, insert_pl_in_mass+star_mass)
# Now, convert P_lower and P_upper to bounds for the period scale (which depend on the unscaled periods of the cluster we are trying to insert):
Pc_lower, Pc_upper = P_lower/insert_pl_out_ρ, P_upper/insert_pl_in_ρ # Pc_lower should decrease if insert_pl_out_ρ > 1, and likewise Pc_upper should increase if insert_pl_in_ρ < 1
if verbose
println("Pc_blocked: ", (Pc_lower, Pc_upper))
end
Pc_segments_unstable[c] = (Pc_lower, Pc_upper)
pl_start += pl_per_cl[c]
end
Pc_segments_unstable = sort(Pc_segments_unstable)
Pc_segments_unstable_nonoverlapping = Tuple{Float64,Float64}[]
x_start, x_stop = Pc_segments_unstable[1]
for i in 1:length(Pc_segments_unstable)-1
if x_stop > Pc_segments_unstable[i+1][1] # segments overlap
x_stop = Pc_segments_unstable[i+1][2]
else # segments do not overlap
if Pc_segments_unstable[i+1][1]/x_stop > max_ratio
push!(Pc_segments_unstable_nonoverlapping, (x_start, x_stop))
x_start, x_stop = Pc_segments_unstable[i+1]
else # segments do not overlap, but there is not enough room between the two segments to insert the cluster
x_stop = Pc_segments_unstable[i+1][2]
end
end
end
push!(Pc_segments_unstable_nonoverlapping, (x_start, x_stop))
if verbose
println("Pc_blocked with no room: ", Pc_segments_unstable_nonoverlapping)
end
return Pc_segments_unstable_nonoverlapping
end
"""
compute_regions_non_overlapping(segments_blocked)
Compute the non-overlapping segments given an array of (potentially overlapping) segments.
# Arguments:
- `segments_blocked::Array{Tuple{Float64,Float64},1}`: array of potentially overlapping segments.
NOTE: the input segments must be sorted in increasing order!
# Returns:
`segments_blocked_nonoverlapping::Array{Tuple{Float64,Float64},1}`: array of non-overlapping segments.
"""
function compute_regions_non_overlapping(segments_blocked::Array{Tuple{Float64,Float64},1})
# Make a list of non-overlapping segments:
segments_blocked_nonoverlapping = Tuple{Float64,Float64}[]
x_start, x_stop = segments_blocked[1]
for i in 1:length(segments_blocked)-1
if x_stop > segments_blocked[i+1][1] # segments overlap
x_stop = segments_blocked[i+1][2]
else # segments do not overlap
push!(segments_blocked_nonoverlapping, (x_start, x_stop))
x_start, x_stop = segments_blocked[i+1]
end
end
push!(segments_blocked_nonoverlapping, (x_start, x_stop))
return segments_blocked_nonoverlapping
end
"""
compute_allowed_regions_cdf_lognormal(segments_blocked; μ=0., σ=1., x_min=0., x_max=Inf, ϵ=1e-12)
Compute the allowed regions in the cumulative distribution function (CDF) of a lognormal distribution, given blocked segments in the domain of the variable.
# Arguments:
- `segments_blocked::Array{Tuple{Float64,Float64},1}`: array of blocked segments.
- `μ::Float64=0.`: mean parameter for the lognormal distribution.
- `σ::Float64=1.`: standard deviation parameter for the lognormal distribution.
- `x_min::Float64=0.`: minimum value of the variable.
- `x_max::Float64=Inf`: maximum value of the variable.
- `ϵ::Float64=1e-12`: minimum segment length in CDF to be included as an allowed segment, to reduce the rate of error due to machine precision.
NOTE: the array of blocked segments do not have to be non-overlapping; this function will first compute the non-overlapping segments.
# Returns:
`cdf_segments_allowed::Array{Tuple{Float64,Float64},1}`: array of (non-overlapping) segments in the CDF that are not blocked.
NOTE: returns an empty array (length zero) if the entire domain between `x_min` and `x_max` is blocked!
"""
function compute_allowed_regions_cdf_lognormal(segments_blocked::Array{Tuple{Float64,Float64},1}; μ::Float64=0., σ::Float64=1., x_min::Float64=0., x_max::Float64=Inf, ϵ::Float64=1e-12)
@assert 0 <= x_min < x_max <= Inf
# Make a list of non-overlapping segments in period deemed unstable:
segments_blocked_nonoverlapping = compute_regions_non_overlapping(segments_blocked)
# Make a list of non-overlapping, allowed segments in the cdf:
cdf_segments_allowed = Tuple{Float64,Float64}[]
n_seg_max = length(segments_blocked_nonoverlapping)+1 # maximum number of allowed segments if all the unstable segments fit between x_min and x_max
for i in 1:n_seg_max
if i==1 && x_min < segments_blocked_nonoverlapping[i][1]
cdf_start = cdf_lognormal(x_min; μ=μ, σ=σ)
cdf_stop = cdf_lognormal(segments_blocked_nonoverlapping[i][1]; μ=μ, σ=σ)
if cdf_stop - cdf_start > ϵ
push!(cdf_segments_allowed, (cdf_start, cdf_stop))
end
elseif i==n_seg_max && x_max > segments_blocked_nonoverlapping[i-1][2]
cdf_start = cdf_lognormal(segments_blocked_nonoverlapping[i-1][2]; μ=μ, σ=σ)
cdf_stop = cdf_lognormal(x_max; μ=μ, σ=σ)
if cdf_stop - cdf_start > ϵ
push!(cdf_segments_allowed, (cdf_start, cdf_stop))
end
elseif 1 < i < n_seg_max
cdf_start = cdf_lognormal(segments_blocked_nonoverlapping[i-1][2]; μ=μ, σ=σ)
cdf_stop = cdf_lognormal(segments_blocked_nonoverlapping[i][1]; μ=μ, σ=σ)
if cdf_stop - cdf_start > ϵ
push!(cdf_segments_allowed, (cdf_start, cdf_stop))
end
end
end
return cdf_segments_allowed
end
"""
compute_allowed_regions_cdf_power_law(segments_blocked; x0, x1, α, ϵ=1e-12)
Compute the allowed regions in the cumulative distribution function (CDF) of a power-law distribution, given blocked segments in the domain of the variable.
# Arguments:
- `segments_blocked::Array{Tuple{Float64,Float64},1}`: array of blocked segments.
- `x0::Float64`: minimum value of the variable.
- `x1::Float64`: maximum value of the variable.
- `α::Float64`: power-law index.
- `ϵ::Float64=1e-12`: minimum segment length in CDF to be included as an allowed segment, to reduce the rate of error due to machine precision.
NOTE: the array of blocked segments do not have to be non-overlapping; this function will first compute the non-overlapping segments.
# Returns:
`cdf_segments_allowed::Array{Tuple{Float64,Float64},1}`: array of (non-overlapping) segments in the CDF that are not blocked.
NOTE: returns an empty array (length zero) if the entire domain between `x0` and `x1` is blocked!
"""
function compute_allowed_regions_cdf_power_law(segments_blocked::Array{Tuple{Float64,Float64},1}; x0::Float64, x1::Float64, α::Float64, ϵ::Float64=1e-12)
@assert x0 < x1
# Make a list of non-overlapping segments in period scale deemed unstable:
segments_blocked_nonoverlapping = compute_regions_non_overlapping(segments_blocked)
# Make a list of non-overlapping, allowed segments in the cdf:
cdf_segments_allowed = Tuple{Float64,Float64}[]
n_seg_max = length(segments_blocked_nonoverlapping)+1 # maximum number of allowed segments if all the unstable segments fit between x0 and x1
for i in 1:n_seg_max
if i==1 && x0 < segments_blocked_nonoverlapping[i][1]
cdf_start = 0.
cdf_stop = cdf_power_law(segments_blocked_nonoverlapping[i][1]; x0=x0, x1=x1, α=α)
if cdf_stop - cdf_start > ϵ
push!(cdf_segments_allowed, (cdf_start, cdf_stop))
end
elseif i==n_seg_max && x1 > segments_blocked_nonoverlapping[i-1][2]
cdf_start = cdf_power_law(segments_blocked_nonoverlapping[i-1][2]; x0=x0, x1=x1, α=α)
cdf_stop = 1.
if cdf_stop - cdf_start > ϵ
push!(cdf_segments_allowed, (cdf_start, cdf_stop))
end
elseif 1 < i < n_seg_max
cdf_start = cdf_power_law(segments_blocked_nonoverlapping[i-1][2]; x0=x0, x1=x1, α=α)
cdf_stop = cdf_power_law(segments_blocked_nonoverlapping[i][1]; x0=x0, x1=x1, α=α)
if cdf_stop - cdf_start > ϵ
push!(cdf_segments_allowed, (cdf_start, cdf_stop))
end
end
end
return cdf_segments_allowed
end
"""
draw_lognormal_allowed_regions(segments_blocked; μ=0., σ=1., x_min=0., x_max=Inf, ϵ=1e-12, verbose=false)
Draw a random value that is not inside a blocked region, from a lognormal distribution.
# Arguments:
- `segments_blocked::Array{Tuple{Float64,Float64},1}`: array of blocked segments.
- `μ::Float64=0.`: mean parameter for the lognormal distribution.
- `σ::Float64=1.`: standard deviation parameter for the lognormal distribution.
- `x_min::Float64=0.`: minimum value of the variable.
- `x_max::Float64=Inf`: maximum value of the variable.
- `ϵ::Float64=1e-12`: minimum segment length in CDF to be included as an allowed segment, to reduce the rate of error due to machine precision.
- `verbose::Bool=false`: whether to print various messages.
NOTE: the array of blocked segments do not have to be non-overlapping.
# Returns:
A value (float) drawn from the lognormal distribution, that is not inside a blocked region.
NOTE: returns NaN if the entire region between `x_min` and `x_max` is blocked!
"""
function draw_lognormal_allowed_regions(segments_blocked::Array{Tuple{Float64,Float64},1}; μ::Float64=0., σ::Float64=1., x_min::Float64=0., x_max::Float64=Inf, ϵ::Float64=1e-12, verbose::Bool=false)
cdf_segments_allowed = compute_allowed_regions_cdf_lognormal(segments_blocked; μ=μ, σ=σ, x_min=x_min, x_max=x_max, ϵ=ϵ)
if length(cdf_segments_allowed) == 0
if verbose
println("No allowed regions left to draw from; returning NaN.")
end
return NaN
else
y = draw_segmented_uniform(cdf_segments_allowed)
return invert_cdf_lognormal(y; μ=μ, σ=σ)
end
end
"""
draw_power_law_allowed_regions(segments_blocked; x0, x1, α, ϵ=1e-12, verbose=false)
Draw a random value that is not inside a blocked region, from a power-law distribution.
# Arguments:
- `segments_blocked::Array{Tuple{Float64,Float64},1}`: array of blocked segments.
- `x0::Float64`: minimum value of the variable.
- `x1::Float64`: maximum value of the variable.
- `α::Float64`: power-law index.
- `ϵ::Float64=1e-12`: minimum segment length in CDF to be included as an allowed segment, to reduce the rate of error due to machine precision.
- `verbose::Bool=false`: whether to print various messages.
NOTE: the array of blocked segments do not have to be non-overlapping.
# Returns:
A value (float) drawn from the power-law distribution, that is not inside a blocked region.
NOTE: returns NaN if the entire region between `x0` and `x1` is blocked!
"""
function draw_power_law_allowed_regions(segments_blocked::Array{Tuple{Float64,Float64},1}; x0::Float64, x1::Float64, α::Float64, ϵ::Float64=1e-12, verbose::Bool=false)
cdf_segments_allowed = compute_allowed_regions_cdf_power_law(segments_blocked; x0=x0, x1=x1, α=α, ϵ=ϵ)
if length(cdf_segments_allowed) == 0
if verbose
println("No allowed regions left to draw from; returning NaN.")
end
return NaN
else
y = draw_segmented_uniform(cdf_segments_allowed)
return invert_cdf_power_law(y; x0=x0, x1=x1, α=α)
end
end
"""
draw_period_lognormal_allowed_regions(P, mass, insert_pl_mass, star_mass, sim_param; μ=0., σ=1., x_min=0., x_max=Inf, ecc=zeros(length(P)), insert_pl_ecc=0., use_mutualHill=true, ϵ=1e-12, verbose=false)
Draw a period from a lognormal distribution, that would allow the system to be stable if the planet is inserted there.
# Arguments:
- `P::AbstractVector{Float64}`: periods of the existing planets.
- `mass::AbstractVector{Float64}`: masses of the existing planets.
- `insert_pl_mass::Float64`: mass of the planet we are trying to insert.
- `star_mass::Float64`: mass of the star.
- `sim_param::SimParam`: a SimParam object containing various simulation parameters.
- `μ::Float64=0.`: mean parameter for the lognormal distribution.
- `σ::Float64=1.`: standard deviation parameter for the lognormal distribution.
- `x_min::Float64=0.`: minimum value of the variable.
- `x_max::Float64=Inf`: maximum value of the variable.
- `ecc::AbstractVector{Float64}=zeros(length(P))`: eccentricities of the existing planets (defaults to circular).
- `insert_pl_ecc::Float64=0.`: eccentricity of the planet we are trying to insert (defaults to circular).
- `use_mutualHill::Bool=true`: whether to use the minimum separation in terms of the mutual Hill radii (default) or the Hill radii (if false).
- `ϵ::Float64=1e-12`: minimum segment length in CDF to be included as an allowed segment, to reduce the rate of error due to machine precision.
- `verbose::Bool=false`: whether to print various messages.
NOTE: the existing planets do not have to be sorted in period.
# Returns:
`P_draw::Float64`: period drawn from the lognormal distribution, that would allow the system to be stable if the planet is inserted there.
NOTE: returns NaN if any period between `x_min` and `x_max` would make the system unstable!
"""
function draw_period_lognormal_allowed_regions(P::AbstractVector{Float64}, mass::AbstractVector{Float64}, insert_pl_mass::Float64, star_mass::Float64, sim_param::SimParam; μ::Float64=0., σ::Float64=1., x_min::Float64=0., x_max::Float64=Inf, ecc::AbstractVector{Float64}=zeros(length(P)), insert_pl_ecc::Float64=0., use_mutualHill::Bool=true, ϵ::Float64=1e-12, verbose::Bool=false)
@assert length(P) == length(mass) == length(ecc)
@assert 0 <= insert_pl_mass < star_mass
@assert all(P .> 0)
@assert all(mass .>= 0)
@assert all(1 .> ecc .>= 0)
@assert 0 <= insert_pl_ecc < 1
if length(P) > 0
P_segments_unstable = compute_unstable_regions_periods_given_planets(P, mass, insert_pl_mass, star_mass, sim_param; ecc=ecc, insert_pl_ecc=insert_pl_ecc, use_mutualHill=use_mutualHill, verbose=verbose)
P_draw = draw_lognormal_allowed_regions(P_segments_unstable; μ=μ, σ=σ, x_min=x_min, x_max=x_max, ϵ=ϵ, verbose=verbose)
else
P_draw = invert_cdf_lognormal(rand(Uniform(cdf_lognormal(x_min; μ=μ, σ=σ), cdf_lognormal(x_max; μ=μ, σ=σ))); μ=μ, σ=σ)
end
return P_draw
end
"""
draw_period_lognormal_allowed_regions_Hill(P, mass, insert_pl_mass, star_mass, sim_param; μ=0., σ=1., x_min=0., x_max=Inf, ecc=zeros(length(P)), insert_pl_ecc=0., ϵ=1e-12, verbose=false)
Draw a period that is not inside a blocked region using Hill radii, from a lognormal distribution, by calling `draw_period_lognormal_allowed_regions`.
NOTE: returns NaN if any period between `x_min` and `x_max` would make the system unstable!
"""
draw_period_lognormal_allowed_regions_Hill(P::AbstractVector{Float64}, mass::AbstractVector{Float64}, insert_pl_mass::Float64, star_mass::Float64, sim_param::SimParam; μ::Float64=0., σ::Float64=1., x_min::Float64=0., x_max::Float64=Inf, ecc::AbstractVector{Float64}=zeros(length(P)), insert_pl_ecc::Float64=0., ϵ::Float64=1e-12, verbose::Bool=false) = draw_period_lognormal_allowed_regions(P, mass, insert_pl_mass, star_mass, sim_param; μ=μ, σ=σ, x_min=x_min, x_max=x_max, ecc=ecc, insert_pl_ecc=insert_pl_ecc, use_mutualHill=false, ϵ=ϵ, verbose=verbose)
"""
draw_period_lognormal_allowed_regions_mutualHill(P, mass, insert_pl_mass, star_mass, sim_param; μ=0., σ=1., x_min=0., x_max:=Inf, ecc=zeros(length(P)), insert_pl_ecc=0., ϵ=1e-12, verbose=false)
Draw a period that is not inside a blocked region using mutual Hill radii, from a lognormal distribution, by calling `draw_period_lognormal_allowed_regions`.
NOTE: returns NaN if any period between `x_min` and `x_max` would make the system unstable!
"""
draw_period_lognormal_allowed_regions_mutualHill(P::AbstractVector{Float64}, mass::AbstractVector{Float64}, insert_pl_mass::Float64, star_mass::Float64, sim_param::SimParam; μ::Float64=0., σ::Float64=1., x_min::Float64=0., x_max::Float64=Inf, ecc::AbstractVector{Float64}=zeros(length(P)), insert_pl_ecc::Float64=0., ϵ::Float64=1e-12, verbose::Bool=false) = draw_period_lognormal_allowed_regions(P, mass, insert_pl_mass, star_mass, sim_param; μ=μ, σ=σ, x_min=x_min, x_max=x_max, ecc=ecc, insert_pl_ecc=insert_pl_ecc, use_mutualHill=true, ϵ=ϵ, verbose=verbose)
"""
draw_periodscale_power_law_allowed_regions_mutualHill(pl_per_cl, P_cl:, mass_cl:, insert_cl_ρ, insert_cl_mass, star_mass, sim_param; x0, x1, α, ecc_cl=zeros(length(P_cl)), insert_cl_ecc=zeros(length(insert_cl_mass)), ϵ=1e-12, verbose=false)
Draw a period scale from a power-law distribution, that would allow the system to be stable if the cluster is inserted there (and cause no overlapping clusters).
# Arguments:
- `pl_per_cl::Vector{Int64}`: number of planets in each existing cluster.
- `P_cl::AbstractVector{Float64}`: periods of the planets in the existing clusters.
- `mass_cl::AbstractVector{Float64}`: masses of the planets in the existing clusters.
- `insert_cl_ρ::AbstractVector{Float64}`: unscaled periods of the planets in the cluster we are trying to insert.
- `insert_cl_mass::AbstractVector{Float64}`: masses of the planets in the cluster we are trying to insert.
- `star_mass::Float64`: mass of the star.
- `sim_param::SimParam`: a SimParam object containing various simulation parameters.
- `x0::Float64`: minimum value of the variable.
- `x1::Float64`: maximum value of the variable.
- `α::Float64`: power-law index.
- `ecc_cl::AbstractVector{Float64}=zeros(length(P_cl))`: eccentricities of the planets in the existing clusters (defaults to circular).
- `insert_cl_ecc::AbstractVector{Float64}=zeros(length(insert_cl_mass))`: eccentricities of the planets in the cluster we are trying to insert (defaults to circular).
- `ϵ::Float64=1e-12`: minimum segment length in CDF to be included as an allowed segment, to reduce the rate of error due to machine precision.
- `verbose::Bool=false`: whether to print various messages.
NOTE: the existing clusters must be grouped (and in the order implied by `pl_per_cl`), but the planets do not have to be sorted in period.
# Returns:
`Pc_draw::Float64`: period scale drawn from the power-law distribution, that would allow the system to be stable if the cluster is inserted there.
NOTE: returns NaN if any period scale between `x0` and `x1` would make the system unstable or cause clusters to overlap!
"""
function draw_periodscale_power_law_allowed_regions_mutualHill(pl_per_cl::Vector{Int64}, P_cl::AbstractVector{Float64}, mass_cl::AbstractVector{Float64}, insert_cl_ρ::AbstractVector{Float64}, insert_cl_mass::AbstractVector{Float64}, star_mass::Float64, sim_param::SimParam; x0::Float64, x1::Float64, α::Float64, ecc_cl::AbstractVector{Float64}=zeros(length(P_cl)), insert_cl_ecc::AbstractVector{Float64}=zeros(length(insert_cl_mass)), ϵ::Float64=1e-12, verbose::Bool=false)
@assert x0 < x1
@assert sum(pl_per_cl) == length(P_cl) == length(mass_cl) == length(ecc_cl)
@assert all(P_cl .> 0)
@assert all(mass_cl .> 0)
@assert all(0 .<= ecc_cl .< 1)
@assert length(insert_cl_ρ) == length(insert_cl_mass) == length(insert_cl_ecc)
@assert all(0 .< insert_cl_ρ)
@assert all(0 .< insert_cl_mass .< star_mass)
@assert all(0 .<= insert_cl_ecc .< 1)
if length(P_cl) > 0
Pc_segments_unstable = compute_unstable_mutualHill_regions_periodscales_given_clusters(pl_per_cl, P_cl, mass_cl, insert_cl_ρ, insert_cl_mass, star_mass, sim_param; ecc_cl=ecc_cl, insert_cl_ecc=insert_cl_ecc, verbose=verbose)
Pc_draw = draw_power_law_allowed_regions(Pc_segments_unstable; x0=x0, x1=x1, α=α, ϵ=ϵ, verbose=verbose)
else
Pc_draw = invert_cdf_power_law(rand(); x0=x0, x1=x1, α=α)
end
return Pc_draw
end
#=
# Removed now that Truncated is deprecated in favor of truncated
function TruncatedUpper(d::Distributions.UnivariateDistribution, u::Float64)
zero(u) < u || error("lower bound should be less than upper bound.")
lcdf = zero(u)
ucdf = isinf(u) ? one(u) : cdf(d, u)
tp = ucdf - lcdf
Distributions.Truncated{typeof(d),Distributions.value_support(typeof(d))}(d, zero(u), u, lcdf, ucdf, tp, log(tp))
end
=#
function generate_e_omega_rayleigh_direct(sigma_hk::Float64; max_e::Float64 = 1.0)
@assert(0<max_e<=1.0)
# ecc::Float64 = rand( TruncatedUpper(Rayleigh(sigma_hk),max_e) )
ecc::Float64 = rand( truncated(Rayleigh(sigma_hk),zero(sigma_hk),max_e) )
w::Float64 = 2pi*rand()
return ecc, w
end
function generate_e_omega_rayleigh_two_gaussians(sigma_hk::Float64; max_e::Float64 = 1.0)
@assert(0<max_e<=1.0)
h = k = 1.0
while h*h+k*k >= max_e*max_e
h = sigma_hk*randn()
k = sigma_hk*randn()
end
ecc::Float64 = sqrt(h*h+k*k)
#w::Float64 = atan2(k,h)
w::Float64 = atan(k,h)
return ecc, w
end
function generate_e_omega_rayleigh(sigma_hk::Float64; max_e::Float64 = 1.0)
if max_e > sigma_hk
return generate_e_omega_rayleigh_two_gaussians(sigma_hk,max_e=max_e)
else
return generate_e_omega_rayleigh_direct(sigma_hk,max_e=max_e)
end
end
function generate_e_omega_rayleigh(sim_param::SimParam; max_e::Float64 = 1.0)
sigma_hk::Float64 = get_real(sim_param,"sigma_hk")
generate_e_omega_rayleigh(sigma_hk, max_e=max_e)
end
struct ParamsTriangle
id_xy::Tuple{Int64,Int64} # indices of the two transformed parameters in a vector of parameters
# A,B,C are vertices of the triangle for parameters (x,y), in no particular order
A::Tuple{Float64,Float64}
B::Tuple{Float64,Float64}
C::Tuple{Float64,Float64}
end
"""
map_square_to_triangle(r1, r2, pt)
Map a point (r1, r2) in the unit square to a point P=(x,y) in the triangle defined by vertices A,B,C. If r1,r2 are uniformly drawn in [0,1], then the point P is also uniformly drawn in the triangle; see http://www.cs.princeton.edu/~funk/tog02.pdf (Section 4.2) for a reference.
# Arguments:
- `r1r2::Tuple{Float64,Float64}`: a point in the unit square representing two transformed parameters.
- `pt::ParamsTriangle`: an object containing the indices of the two transformed parameters and vertices of the triangle defining the bounds for the two parameters.
# Returns:
- `P::Tuple{Float64,Float64}`: a point for the two parameters in the triangle corresponding to the point given by `r1r2` in the unit square.
"""
function map_square_to_triangle(r1r2::Tuple{Float64,Float64}, pt::ParamsTriangle)
@assert all(0 .<= r1r2 .<= 1)
r1, r2 = r1r2
P = (1. - sqrt(r1)) .* pt.A .+ (sqrt(r1)*(1. - r2)) .* pt.B .+ (sqrt(r1)*r2) .* pt.C
return P
end
function generate_planetary_system_hardcoded_example(star::StarAbstract, sim_param::SimParam; verbose::Bool = false)
# in this version we specify fixed functions that are known at compile time, allowing for additional optimizations (~0.6 second faster per Kepler catalog out of ~3.6 sec on my laptop w/ 1 core)
generate_planet_mass_from_radius = generate_planet_mass_from_radius_powerlaw
generate_num_planets = generate_num_planets_poisson
generate_period_and_sizes = generate_period_and_sizes_log_normal
generate_e_omega = generate_e_omega_rayleigh
# generate_star = get_function(sim_param,"generate_star")
# star::StarAbstract = generate_star(sim_param)
num_pl::Int64 = generate_num_planets(star, sim_param)
if( num_pl==0 )
return PlanetarySystem(star)
else
(Plist::Vector{Float64}, Rlist::Vector{Float64}) = generate_period_and_sizes(star, sim_param, num_pl=num_pl)
idx = sortperm(Plist) # TODO OPT: Check to see if sorting is significant time sink. If so, it might could be deferred
min_a_in_rstar = 2.0
min_P_orbit = day_in_year*sqrt((min_a_in_rstar*star.radius*rsol_in_au)^3 / star.mass) # minimum semi-major axis of two stellar radii
idx = idx[Plist[idx] .> min_P_orbit]
if( length(idx)==0 )
return PlanetarySystem(star)
end
pl = Array{Planet}(length(idx))
orbit = Array{Orbit}(length(idx))
a = map(i->semimajor_axis(Plist[i],star.mass),idx)
max_e = ones(length(idx))
max_e_factor = 0.999 # A factor just less than 1 to prevent numerical issues with near-crossing orbits
if length(a)>=2
for i in 1:length(a)
if i==1
max_e[i] = max_e_factor*(1-a[i]/a[i+1])/(1+a[i]/a[i+1])
elseif i==length(a)
max_e[i] = max_e_factor*(1-a[i-1]/a[i])/(1+a[i-1]/a[i])
else
max_e[i] = max_e_factor*min( (1-a[i]/a[i+1])/(1+a[i]/a[i+1]), (1-a[i-1]/a[i])/(1+a[i-1]/a[i]) )
end
end
end
for i in 1:length(idx)
# if verbose println("i=",i," idx=",idx," Plist=",Plist[idx] ); end
P = Plist[idx[i]]
Rpl = Rlist[idx[i]]
(ecc::Float64, omega::Float64) = generate_e_omega(sim_param, max_e=max_e[i])
incl::Float64 = acos(rand())
orbit[i] = Orbit(P,ecc,incl,omega,2pi*rand(),2pi*rand())
mass::Float64 = generate_planet_mass_from_radius(Rpl, star, orbit[i], sim_param)
pl[i] = Planet( Rpl, mass )
end
return PlanetarySystem(star,pl,orbit)
end
end
function generate_planetary_system_empty(star::StarAbstract, sim_param::SimParam; verbose::Bool = false)
return PlanetarySystem(star)::PlanetarySystem
end
function generate_planetary_system_uncorrelated_incl(star::StarAbstract, sim_param::SimParam; verbose::Bool = false)
# load functions to use for drawing parameters
generate_planet_mass_from_radius = get_function(sim_param,"generate_planet_mass_from_radius")
generate_num_planets = get_function(sim_param,"generate_num_planets")
generate_period_and_sizes = get_function(sim_param,"generate_period_and_sizes")
generate_e_omega = get_function(sim_param,"generate_e_omega")
# generate_star = get_function(sim_param,"generate_star")
# star::StarAbstract = generate_star(sim_param)
num_pl::Int64 = generate_num_planets(star, sim_param)::Int64
sigma_ecc::Float64 = haskey(sim_param,"sigma_hk") ? get_real(sim_param,"sigma_hk") : 0.0
if( num_pl==0 )
return PlanetarySystem(star)::PlanetarySystem
else
(Plist::Vector{Float64}, Rlist::Vector{Float64}) = generate_period_and_sizes(star, sim_param, num_pl=num_pl)
idx = sortperm(Plist) # TODO OPT: Check to see if sorting is significant time sink. If so, it might could be deferred
min_a_in_rstar = 2.0
min_P_orbit = day_in_year*sqrt((min_a_in_rstar*star.radius*rsol_in_au)^3 / star.mass) # minimum semi-major axis of two stellar radii
idx = idx[Plist[idx] .> min_P_orbit]
if( length(idx)==0 )
return PlanetarySystem(star)
end
pl = Array{Planet}(undef,length(idx))
orbit = Array{Orbit}(undef,length(idx))
a = map(i->semimajor_axis(Plist[i],star.mass),idx)
max_e = ones(length(idx))
max_e_factor = 0.999 # A factor just less than 1 to prevent numerical issues with near-crossing orbits
if length(a)>=2
for i in 1:length(a)
if i==1
max_e[i] = max_e_factor*(1-a[i]/a[i+1])/(1+a[i]/a[i+1])
elseif i==length(a)
max_e[i] = max_e_factor*(1-a[i-1]/a[i])/(1+a[i-1]/a[i])
else
max_e[i] = max_e_factor*min( (1-a[i]/a[i+1])/(1+a[i]/a[i+1]), (1-a[i-1]/a[i])/(1+a[i-1]/a[i]) )
end
end
end
for i in 1:length(idx)
# if verbose println("i=",i," idx=",idx," Plist=",Plist[idx] ); end
P = Plist[idx[i]]
Rpl = Rlist[idx[i]]
if haskey(sim_param,"sigma_hk_one") && haskey(sim_param,"sigma_hk_multi")
sigma_ecc = num_pl == 1 ? get_real(sim_param,"sigma_hk_one") : get_real(sim_param,"sigma_hk_multi")
end
(ecc::Float64, omega::Float64) = generate_e_omega(sim_param, max_e=max_e[i])
incl::Float64 = acos(rand())
orbit[i] = Orbit(P,ecc,incl,omega,2pi*rand(),2pi*rand())
# set!(orbit[idx[i]],P,ecc,incl,omega,2pi*rand(),2pi*rand())
mass::Float64 = generate_planet_mass_from_radius(Rpl, star, orbit[i], sim_param)
pl[i] = Planet( Rpl, mass )
end
return PlanetarySystem(star,pl,orbit)
end
end
# This version generates more systems roughly near a common plane, but until incorporate CORBITS data, ABC can't match input param
function generate_planetary_system_simple(star::StarAbstract, sim_param::SimParam; verbose::Bool = false)
# load functions to use for drawing parameters
generate_planet_mass_from_radius = get_function(sim_param,"generate_planet_mass_from_radius")
generate_num_planets = get_function(sim_param,"generate_num_planets")
# generate_num_planets = generate_num_planets_christiansen
# generate_period_and_sizes = generate_period_and_sizes_christiansen
generate_period_and_sizes = get_function(sim_param,"generate_period_and_sizes")
generate_e_omega = get_function(sim_param,"generate_e_omega")
sigma_incl = deg2rad(get_real(sim_param,"sigma_incl"))
# generate_star = get_function(sim_param,"generate_star")
# star::StarAbstract = generate_star(sim_param)
num_pl = generate_num_planets(star, sim_param)::Int64
sigma_ecc::Float64 = haskey(sim_param,"sigma_hk") ? get_real(sim_param,"sigma_hk") : 0.0
if( num_pl==0 )
return PlanetarySystem(star)
else
(Plist::Vector{Float64}, Rlist::Vector{Float64}) = generate_period_and_sizes(star, sim_param, num_pl=num_pl)
idx = sortperm(Plist) # TODO OPT: Check to see if sorting is significant time sink. If so, it might could be deferred
incl_sys = acos(rand())
min_a_in_rstar = 2.0
min_P_orbit = day_in_year*sqrt((min_a_in_rstar*star.radius*rsol_in_au)^3 / star.mass) # minimum semi-major axis of two stellar radii
idx = idx[Plist[idx] .> min_P_orbit]
if( length(idx)==0 )
return PlanetarySystem(star)
end
pl = Array{Planet}(undef,length(idx))
orbit = Array{Orbit}(undef,length(idx))
for i in 1:length(idx)
# if verbose println("i=",i," idx=",idx," Plist=",Plist[idx] ); end
P = Plist[idx[i]]
Rpl = Rlist[idx[i]]
if haskey(sim_param,"sigma_hk_one") && haskey(sim_param,"sigma_hk_multi")
sigma_ecc = num_pl == 1 ? get_real(sim_param,"sigma_hk_one") : get_real(sim_param,"sigma_hk_multi")
end
(ecc, omega) = generate_e_omega(sim_param)::Tuple{Float64,Float64}
incl_mut = sigma_incl*sqrt(randn()^2+randn()^2) # rand(Distributions.Rayleigh(sigma_incl)) # sigma_incl*randn()
asc_node = 2pi*rand()
mean_anom = 2pi*rand()
#incl = incl_sys + sigma_incl*randn()
incl = incl_mut!=zero(incl_mut) ? acos( cos(incl_sys)*cos(incl_mut) + sin(incl_sys)*sin(incl_mut)*cos(asc_node) ) : incl_sys
orbit[i] = Orbit(P,ecc,incl,omega,asc_node,mean_anom)
mass = generate_planet_mass_from_radius(Rpl, star, orbit[i], sim_param)::Float64
pl[i] = Planet( Rpl, mass )
end
return PlanetarySystem(star,pl,orbit)
end
end
function test_planetary_system_constructors(sim_param::SimParam)
generate_star = get_function(sim_param,"generate_star")
star = generate_star(sim_param)
empty_sys = PlanetarySystem(star)
earth = Planet(earth_radius,earth_mass)
earth_orbit = Orbit(365.2425,0.0167,0.5*pi,0.0,0.0,0.0)
solar_sys = PlanetarySystem(star, earth,earth_orbit)
m = generate_planet_mass_from_radius_powerlaw(0.02,star,earth_orbit,sim_param)/earth_mass
generate_planetary_system_simple(star,sim_param,verbose=true)
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 1539 | ## ExoplanetsSysSim/src/setup.jl
## (c) 2015 Eric B. Ford
if VERSION >=v"0.7.0-"
using Pkg
#using Libdl
end
# How to install ExoplanetsSysSim package
# Pkg.add(PackageSpec(url="[email protected]:eford/ExoplanetsSysSim.jl.git"))
#=
# Is this still needed, now that we've moved to github?
# If so, this would need to be updated for Julia v0.7.0
# Since bitbucket messed up capitalization of package name
if ! isdir( joinpath(Pkg.devdir(),"ExoplanetsSysSim") )
symlink( joinpath(Pkg.dir(),"exoplanetssyssim"), joinpath(Pkg.dir(),"ExoplanetsSysSim") )
end
=#
try
Pkg.add(PackageSpec(url="[email protected]:eford/ABC.jl.git"))
catch
warn("Attempted to install ABC.jl package, but was not successful.")
warn("While most of SysSim will still work, some functionality will not be avaliable unless you install ABC correctly.")
end
try
Pkg.add(PackageSpec(url="[email protected]:jbrakensiek/CORBITS.git"))
# Compile CORBITS library and put it somewhere we can find
cd(joinpath(Pkg.devdir(),"CORBITS"))
run(`make lib`)
cd(homedir())
if !is_windows()
symlink( joinpath(Pkg.devdir(),"CORBITS","libcorbits.so"), joinpath(Pkg.devdir(),"ExoplanetsSysSim","libcorbits.so") )
else
cp( joinpath(Pkg.devdir(),"CORBITS","libcorbits.so"), joinpath(Pkg.devdir(),"ExoplanetsSysSim","libcorbits.so") )
end
catch
warn("Attempted to install CORBITS.jl package, but was not successful.")
warn("While most of SysSim will still work, some functionality will not be avaliable unless you install CORBITS correctly.")
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 12338 | ## ExoplanetsSysSim.jl
## (c) 2015 Eric B. Ford
module SimulationParameters
import Compat: @compat #, readstring
import LibGit2
using Pkg
using ExoplanetsSysSim
export SimParam, add_param_fixed, add_param_active, update_param, set_active, set_inactive, is_active
export get_any, get_real, get_int, get_bool, get_function
export make_vector_of_active_param_keys, make_vector_of_sim_param, get_range_for_sim_param, update_sim_param_from_vector!
export setup_sim_param_demo, test_sim_param_constructors
#export preallocate_memory!
const global julia_version_pair = ("version_julia",string(VERSION))
function package_version_or_head(m::Module)
try
repo = LibGit2.GitRepo(dirname(pathof(m)))
return head = LibGit2.headname(repo)
catch
#return Pkg.installed()[string(m)]
@warn "Can't find headname of repo for " * string(m)
end
end
function package_version_or_head(m::String)
[(name=d.name,version=d.version) for d in values(Pkg.dependencies()) if d.is_direct_dep ]
end
function pkg_installed()
collect(skipmissing(map(d->d.is_direct_dep ? (name=d.name,version=d.version) : missing,values(Pkg.dependencies()))))
end
mutable struct SimParam
param::Dict{String,Any}
active::Dict{String,Bool}
end
copy(p::SimParam) = SimParam(copy(p.param),copy(p.active))
"""
SimParam(p::Dict{String,Any})
Creates a SimParam object from the dictionary p, with all parameter defaulting to inactive.
"""
function SimParam(p::Dict{String,Any}) # By default all parameters are set as inactive (i.e., not allowed to be optimized)
a = Dict{String,Bool}()
for k in keys(p)
a[k] = false
end
return SimParam(p,a)
end
function SimParam()
#d = Dict{String,Any}([julia_version_pair, ("Pkg.installed",Pkg.installed())])
d = Dict{String,Any}([julia_version_pair, ("Pkg.installed",pkg_installed())])
return SimParam(d)
end
"Update SimParam() to define current state at run time, and not precompile time"
function __init__()
"""
SimParam()
Creates a nearly empty SimParam object, with just the version id and potentially other information about the code, system, runtime, etc.
"""
function SimParam()
#d = Dict{String,Any}([ julia_version_pair, ("hostname",gethostname()), ("time",time()), ("Pkg.installed",Pkg.installed()) ])
d = Dict{String,Any}([ julia_version_pair, ("hostname",gethostname()), ("time",time()), ("Pkg.installed",pkg_installed()) ])
SimParam(d)
end
end
"""
add_param_fixed(sim::SimParam, key::String,val::Any)
Adds (or overwrites) key with value val to the SimParam object, sim, and sets the parameter set to inactive.
"""
function add_param_fixed(sim::SimParam, key::String,val::Any)
sim.param[key] = val
sim.active[key] = false
end
"""
### add_param_active(sim::SimParam, key::String,val::Any)
Adds (or overwrites) key with value val to the SimParam object, sim, and sets the parameter set to active.
"""
function add_param_active(sim::SimParam, key::String,val::Any)
sim.param[key] = val
sim.active[key] = true
end
"""
### update_param(sim::SimParam, key::String,val::Any)
Overwrites key with value val to the SimParam object, sim
"""
function update_param(sim::SimParam, key::String,val::Any)
@assert haskey(sim.param,key)
sim.param[key] = val
end
"""
### set_active(sim::SimParam, key::String)
Sets the key parameter to be active in sim.
"""
function set_active(sim::SimParam,key::String)
@assert haskey(sim.param,key)
sim.active[key] = true
end
"""
### set_active(sim::SimParam, keys::Vector{String})
Sets each of the key parameters to be active in sim.
"""
function set_active(sim::SimParam,keys::Vector{String})
for k in keys
set_active(sim,k)
end
end
"""
### set_inactive(sim::SimParam, key::String)
Sets the key parameter to be inactive in sim.
"""
function set_inactive(sim::SimParam,key::String)
@assert haskey(sim.param,key)
sim.active[key] = false
end
"""
### set_inactive(sim::SimParam, keys::Vector{String})
Sets each of the key parameters to be inactive in sim.
"""
function set_inactive(sim::SimParam,keys::Vector{String})
for k in keys
set_inactive(sim,k)
end
end
function is_active(sim::SimParam,key::String)
@assert haskey(sim.active,key)
sim.active[key]
end
import Base.get
function get(sim::SimParam, key::String, default_val::T) where T
val::T = get(sim.param,key,default_val)::T
return val
end
function get_any(sim::SimParam, key::String, default_val::Any)
val = get(sim.param,key,default_val)
return val
end
function get_real(sim::SimParam, key::String)
val::Float64 = get(sim.param,key,convert(Float64,NaN) )::Float64
@assert(val!=convert(Float64,NaN))
return val
end
function get_int(sim::SimParam, key::String)
val::Int64 = get(sim.param,key,zero(Int64))
#@assert(val!=nan(zero(Int64)))
#@assert(val!=oftype(x,NaN))
return val
end
function get_bool(sim::SimParam, key::String)
val::Bool = get(sim.param,key,false)
return val
end
function noop()
end
function get_function(sim::SimParam, key::String)
val::Function = Base.get(sim.param,key,noop)::Function
#val = Base.get(sim.param,key,null)
@assert((val!=nothing) && (val!=noop))
return val::Function
end
import Base.haskey
haskey(sim::SimParam, key::String) = return haskey(sim.param,key)
function make_vector_of_active_param_keys(sim::SimParam)
sortedkeys = sort(collect(keys(sim.param)))
sortedkeys_active = sortedkeys[map(k->get(sim.active,k,false),sortedkeys)]
return sortedkeys_active
end
function make_vector_of_sim_param(sim::SimParam)
param_vector = Float64[] # QUERY: Currently we make separate vectors of Ints and Floats. Does this make sense?
sss = sort(collect(keys(sim.param)))
for k in 1:length(sss)
if(sim.active[sss[k]]==false)
continue
end
if(length(sim.param[sss[k]])==1)
if isa( sim.param[sss[k]], Real )
push!(param_vector,sim.param[sss[k]])
elseif(eltype( sim.param[sss[k]]) <: Real)
append!(param_vector,vec(sim.param[sss[k]]))
end
elseif(eltype( sim.param[sss[k]]) <: Real)
append!(param_vector, vec(sim.param[sss[k]]))
else
if eltype( sss[k]) <: Real
append!(param_vector,sim.param[sss[k]])
end
end
end
return param_vector
end
function get_range_for_sim_param(key::String, sim::SimParam)
sorted_keys = sort(collect(keys(sim.param)))
i = 1
for k in 1:length(sorted_keys)
if(sim.active[sorted_keys[k]]==false)
continue
end
param_len = length(sim.param[sorted_keys[k]])
if sorted_keys[k]==key
return i:(i+param_len)
else
#println("Didn't match >",sorted_keys[k],"< and >",key,"<.")
i += param_len
end
end
println("# ERROR: Never found range for param: ",key)
return 0:0
end
function update_sim_param_from_vector!(param::Vector{Float64}, sim::SimParam)
#println("# Input vector: ",param)
sorted_keys = sort(collect(keys(sim.param)))
i = 1
for k in 1:length(sorted_keys)
if(sim.active[sorted_keys[k]]==false)
continue
end
param_len = length(sim.param[sorted_keys[k]])
if param_len==1
if isa( sim.param[sorted_keys[k]], Real )
# println("# Replacing >",sorted_keys[k],"< with >",param[i],"<")
sim.param[sorted_keys[k]] = param[i]
i = i+1
elseif eltype( sim.param[sorted_keys[k]]) <: Real
# println("# Replacing >",sim.param[sorted_keys[k]],"< with >",reshape(param[i:i+param_len-1], size(sim.param[sorted_keys[k]]),"<")
sim.param[sorted_keys[k]] = reshape(param[i:i+param_len-1], size(sim.param[sorted_keys[k]]))
i = i+1
end
elseif param_len>1
if eltype( sim.param[sorted_keys[k]]) <: Real
# println("# Replacing >",sim.param[sorted_keys[k]],"< with >",reshape(param[i:i+param_len-1], size(sim.param[sorted_keys[k]])),"<")
sim.param[sorted_keys[k]] = reshape(param[i:i+param_len-1], size(sim.param[sorted_keys[k]]))
i = i+param_len
end
else
println("# Don't know what to do with empty simulation parameter: ",sorted_keys[k])
end
end
return sim
end
function preallocate_memory!(sim_param::SimParam)
num_kepler_targets = get_int(sim_param,"num_kepler_targets")
add_param_fixed(sim_param,"mem_kepler_target_obs", Array{KeplerTargetObs}(num_kepler_targets) )
end
function setup_sim_param_demo(args::Vector{String} = Array{String}(undef,0) ) # allow this to take a list of parameter (e.g., from command line)
sim_param = SimParam()
add_param_fixed(sim_param,"max_tranets_in_sys",7)
add_param_fixed(sim_param,"num_targets_sim_pass_one",190000) # Note this is used for the number of stars in the simulations, not necessarily related to number of Kepler targets
add_param_fixed(sim_param,"num_kepler_targets",190000) # Note this is used for the number of Kepler targets for the observational catalog
add_param_fixed(sim_param,"generate_star",ExoplanetsSysSim.generate_star_dumb)
#add_param_fixed(sim_param,"generate_planetary_system", ExoplanetsSysSim.generate_planetary_system_simple)
add_param_fixed(sim_param,"generate_planetary_system", ExoplanetsSysSim.generate_planetary_system_uncorrelated_incl)
# add_param_fixed(sim_param,"generate_kepler_target",ExoplanetsSysSim.generate_kepler_target_simple)
add_param_fixed(sim_param,"generate_kepler_target",ExoplanetsSysSim.generate_kepler_target_from_table)
add_param_fixed(sim_param,"star_table_setup",StellarTable.setup_star_table)
add_param_fixed(sim_param,"stellar_catalog","q1q17_dr25_gaia_fgk.jld2")
add_param_fixed(sim_param,"generate_num_planets",ExoplanetsSysSim.generate_num_planets_poisson)
add_param_active(sim_param,"log_eta_pl",log(2.0))
add_param_fixed(sim_param,"generate_planet_mass_from_radius",ExoplanetsSysSim.generate_planet_mass_from_radius_powerlaw)
add_param_fixed(sim_param,"vetting_efficiency",ExoplanetsSysSim.vetting_efficiency_none)
add_param_fixed(sim_param,"mr_power_index",2.0)
add_param_fixed(sim_param,"mr_const",1.0)
#add_param_fixed(sim_param,"generate_period_and_sizes",ExoplanetsSysSim.generate_period_and_sizes_log_normal)
#add_param_active(sim_param,"mean_log_planet_radius",log(2.0*earth_radius))
#add_param_active(sim_param,"sigma_log_planet_radius",log(2.0))
#add_param_active(sim_param,"mean_log_planet_period",log(5.0))
#add_param_active(sim_param,"sigma_log_planet_period",log(2.0))
add_param_fixed(sim_param,"generate_period_and_sizes", ExoplanetsSysSim.generate_period_and_sizes_power_law)
add_param_active(sim_param,"power_law_P",0.3)
add_param_active(sim_param,"power_law_r",-2.44)
add_param_fixed(sim_param,"min_period",1.0)
add_param_fixed(sim_param,"max_period",100.0)
add_param_fixed(sim_param,"min_radius",0.5*ExoplanetsSysSim.earth_radius)
add_param_fixed(sim_param,"max_radius",10.0*ExoplanetsSysSim.earth_radius)
add_param_fixed(sim_param,"generate_e_omega",ExoplanetsSysSim.generate_e_omega_rayleigh)
add_param_fixed(sim_param,"sigma_hk",0.03)
add_param_fixed(sim_param,"sigma_incl",2.0) # degrees
add_param_fixed(sim_param,"calc_target_obs_sky_ave",ExoplanetsSysSim.calc_target_obs_sky_ave)
add_param_fixed(sim_param,"calc_target_obs_single_obs",ExoplanetsSysSim.calc_target_obs_single_obs)
add_param_fixed(sim_param,"read_target_obs",ExoplanetsSysSim.simulated_read_kepler_observations)
add_param_fixed(sim_param,"transit_noise_model",ExoplanetsSysSim.transit_noise_model_fixed_noise)
# add_param_fixed(sim_param,"transit_noise_model",transit_noise_model_diagonal)
# add_param_fixed(sim_param,"rng_seed",1234) # If you want to be able to reproduce simulations
# Do other initialization tasks belong here or elsewhere?
# TODO OPT: Try to preallocate memory for each target to see if this makes a performance difference
# preallocate_memory!(sim_param)
return sim_param
end
function test_sim_param_constructors()
oldval = log(2.0)
sim_param = SimParam( Dict([ julia_version_pair, ("num_kepler_targets",190000), ("log_eta_pl",oldval), ("max_tranets_in_sys",7)] ) )
get(sim_param,"version_julia","")
set_active(sim_param,"log_eta_pl")
sp_vec = make_vector_of_sim_param(sim_param)
sp_vec .+= 0.1
update_sim_param_from_vector!(sp_vec,sim_param)
newval = get_real(sim_param,"log_eta_pl")
isapprox(oldval+0.1,newval,atol=0.001)
end
end
#test_sim_param_constructors()
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 2707 | ## ExoplanetsSysSim/src/star.jl
## (c) 2015 Eric B. Ford
#using Distributions
@compat abstract type StarAbstract end # TODO OPT: Check does using StarAbstract cause a significant performance hit
@compat abstract type SingleStarAbstract <: StarAbstract end
struct Star{LimbDarkeningT<:LimbDarkeningParamAbstract} <: SingleStarAbstract
radius::Float64
mass::Float64
flux::Float64 # relevant once have multiple stars in one target
ld::LimbDarkeningT
id::Int64 # id for looking up properties in stellar catalog
end
SingleStar = Star
#=
function SingleStar{LimbDarkeningT}(radius, mass, flux, ld::LimbDarkeningT, id::Int64) where {LimbDarkeningT<:LimbDarkeningParamAbstract}
end
=#
struct BinaryStar{LDT1<:LimbDarkeningParamAbstract,LDT2<:LimbDarkeningParamAbstract} <: StarAbstract
primary::Star{LDT1}
secondary::Star{LDT2}
orbit::Orbit
end
struct MultipleStar <: StarAbstract # Will we want to handle triple, quad systems?
component::Vector{StarAbstract}
orbit::Vector{Orbit}
end
flux(s::Star) = s.flux # Demo of how to specify function behavior that depends on the derived type
flux(s::BinaryStar) = s.primary.flux + s.secondary.flux
flux(s::MultipleStar) = sum( flux, s.component)
mass(s::Star) = s.mass
mass(s::BinaryStar) = s.primary.mass + s.secondary.mass
mass(s::MultipleStar) = sum( mass, s.component)::Float64
function generate_stars(sim_param::SimParam)
generate_star = get_function(sim_param,"generate_star")
num_target_stars = get_int(sim_param,"num_targets_sim_pass_one")
star_list = Array{StarAbstract}(undef,num_target_stars)
for i in 1:num_target_stars
s = generate_star(sim_param)
star_list[i] = s
#star_list[i] = generate_star(sim_param)
end
return star_list
end
function generate_star_dumb(sim_param::SimParam)
r = rand(Uniform(0.8,1.3))::Float64
m = rand(Normal(r,0.1))::Float64
while m<0.0
m = rand(Normal(r,0.1))::Float64
end
f = rand(Normal(1.0,0.1))::Float64
while f<0.0
f = 1.0+0.1*randn()
end
ld = LimbDarkeningParamQuadratic(0.4603,0.2291)
# ld = LimbDarkeningParam4thOrder(0.,0.,0.,0.) # Equivalent to uniform surface brightness for testing
return SingleStar(r,m,f,ld,0)
end
function test_star_constructors(sim_param::SimParam)
star_tmp = generate_star_dumb(sim_param)
f1 = flux(star_tmp)
f2 = flux(BinaryStar(star_tmp,star_tmp,Orbit(10.0,0.0,0.0,0.0,0.0,0.0)))
f4 = flux(MultipleStar([star_tmp for i in 1:4], [Orbit(10.0,0.0,0.0,0.0,0.0,0.0) for i in 1:4]) )
# println("# Fluxes: ", f1, " ", f2, " ", f4)
star_list = generate_stars(sim_param)
return true
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 4735 | ## ExoplanetsSysSim/src/stellar_table.jl
## (c) 2015 Eric B. Ford
module StellarTable
using ExoplanetsSysSim
#using DataArrays
using DataFrames
using CSV
#using JLD
using JLD2
using FileIO
#if VERSION >= v"0.5-"
# import Compat: UTF8String, ASCIIString
#end
export setup_star_table, star_table, num_usable_in_star_table, set_star_table, star_table_has_key
df = DataFrame()
function setup(sim_param::SimParam; force_reread::Bool = false)
global df
wf = WindowFunction.setup_window_function(sim_param)
WindowFunction.setup_OSD_interp(sim_param) #read in osd files so they can be interpolated
if haskey(sim_param,"read_stellar_catalog") && !force_reread
return df
#return data
end
stellar_catalog_filename = convert(String,joinpath(dirname(pathof(ExoplanetsSysSim)), "..","data", convert(String,get(sim_param,"stellar_catalog","q1_q17_dr24_stellar.csv")) ) )
df = setup(stellar_catalog_filename)
add_param_fixed(sim_param,"read_stellar_catalog",true)
add_param_fixed(sim_param,"num_kepler_targets",num_usable_in_star_table())
return df
end
function setup(filename::String; force_reread::Bool = false)
global df
if occursin(r".jld2$",filename) || occursin(r".jld$",filename)
#if occursin(r".jld$",filename)
try
data = load(filename)
df = data["stellar_catalog"]
Core.typeassert(df,DataFrame)
catch
error(string("# Failed to read stellar catalog >",filename,"< in jld2 format."))
end
else
try
#df = readtable(filename)
#df = CSV.read(filename,nullable=true)
df = CSV.read(filename, allowmissing=:all)
catch
error(string("# Failed to read stellar catalog >",filename,"< in ascii format."))
end
# See options at: http://exoplanetarchive.ipac.caltech.edu/docs/API_keplerstellar_columns.html
# Now we read in all CDPP's, so can interpolate to transit duration
symbols_to_keep = [ :kepid, :mass, :mass_err1, :mass_err2, :radius, :radius_err1, :radius_err2, :dens, :dens_err1, :dens_err2, :rrmscdpp01p5, :rrmscdpp02p0, :rrmscdpp02p5, :rrmscdpp03p0, :rrmscdpp03p5, :rrmscdpp04p5, :rrmscdpp05p0, :rrmscdpp06p0, :rrmscdpp07p5, :rrmscdpp09p0, :rrmscdpp10p5, :rrmscdpp12p0, :rrmscdpp12p5, :rrmscdpp15p0, :cdppslplong, :cdppslpshrt, :dataspan, :dutycycle, :limbdark_coeff1, :limbdark_coeff2, :limbdark_coeff3, :limbdark_coeff4 ]
delete!(df, [~(x in symbols_to_keep) for x in names(df)]) # delete columns that we won't be using anyway
is_usable = [ !any(ismissing.([ df[i,j] for j in 1:size(df,2) ])) for i in 1:size(df,1) ]
usable = find(is_usable)
df = df[usable, symbols_to_keep]
end
df[!,:wf_id] = map(x->ExoplanetsSysSim.WindowFunction.get_window_function_id(x,use_default_for_unknown=false),df[!,:kepid])
obs_5q = df[!,:wf_id].!=-1
#df = df[obs_5q,keys(df.colindex)]
df = df[obs_5q,names(df)]
StellarTable.set_star_table(df)
return df
end
setup_star_table(sim_param::SimParam; force_reread::Bool = false) = setup(sim_param, force_reread=force_reread)
setup_star_table(filename::String) = setup(filename)
function num_usable_in_star_table()
global df
return size(df,1)
end
function star_table(i::Integer, sym::Symbol)
global df
return df[i,sym]
end
function star_table(i::Integer)
global df
return df[i,:]
end
function star_table(i::Integer, sym::Vector{Symbol})
global df
return df[i,sym]
end
function star_table(i::Vector{Integer}, sym::Symbol)
global df
return df[i,sym]
end
function star_table(i::Vector{Integer}, sym::Vector{Symbol})
global df
return df[i,sym]
end
function set_star_table(df2::DataFrame)
global df
df = df2
end
function star_table_has_key(s::Symbol)
global df
hasproperty(df,s)
end
end # module StellarTable
# using ExoplanetsSysSim.StellarTable
#=
function generate_star_from_table(sim_param::SimParam, id::Integer) # WARNING: To be renamed once there's a working/tested version that uses a stellar catalog with GAIA data
mu_r = StellarTable.star_table(id,:radius)
sig_r1 = StellarTable.star_table(id,:radius_err1)
sig_r2 = StellarTable.star_table(id,:radius_err2)
z = randn()
r = mu_r + (z>0) ? z*sig_r1 : z*sig_r2
m = rand(Normal(r,0.1))::Float64
while m<0.0
m = rand(Normal(r,0.1))::Float64
end
f = rand(Normal(1.0,0.1))::Float64
while f<0.0
f = 1.0+0.1*randn()
end
ld = LimbDarkeningParam4thOrder(StellarTable.star_table(id,:limbdark_coeff1), StellarTable.star_table(id,:limbdark_coeff2), StellarTable.star_table(id,:limbdark_coeff3), StellarTable.star_table(id,:limbdark_coeff4) )
return SingleStar(r,m,f,id,ld)
#return SingleStar(r,m,f,id)
end
=#
function generate_star_from_table(sim_param::SimParam)
id = rand(1:StellarTable.num_usable_in_star_table())
generate_star_from_table(sim_param, id)
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 9915 | # ExoplanetsSysSim/src/summary_statistics.jl
## (c) 2015 Eric B. Ford
using Statistics
mutable struct CatalogSummaryStatistics
stat::Dict{String,Any} # For storing summary statistics
cache::Dict{String,Any} # For caching data that's not a summary statistic
end
function CatalogSummaryStatistics() # Constructor for empty CatalogSummaryStatistics objects for creating globals to be used by closure
CatalogSummaryStatistics( Dict{String,Any}(), Dict{String,Any}() )
end
function calc_summary_stats_sim_pass_one_demo(cat_obs::KeplerObsCatalog, cat_phys::KeplerPhysicalCatalog, param::SimParam ) # Version for simulated data, since includes cat_phys
ssd = Dict{String,Any}()
cache = Dict{String,Any}()
max_tranets_in_sys = get_int(param,"max_tranets_in_sys") # Demo that simulation parameters can specify how to evalute models, too
@assert max_tranets_in_sys >= 1
idx_tranets = findall(x::KeplerTargetObs-> length(x.obs) > 0, cat_obs.target)::Array{Int64,1} # Find indices of systems with at least 1 tranet = potentially detectable transiting planet
# Count total number of tranets and compile indices for N-tranet systems
num_tranets = 0
idx_n_tranets = Vector{Int64}[ Int64[] for m = 1:max_tranets_in_sys]
for n in 1:max_tranets_in_sys-1
idx_n_tranets[n] = findall(x::KeplerTargetObs-> length(x.obs) == n, cat_obs.target[idx_tranets] )
num_tranets += n*length(idx_n_tranets[n])
end
idx_n_tranets[max_tranets_in_sys] = findall(x::KeplerTargetObs-> length(x.obs) >= max_tranets_in_sys, cat_obs.target[idx_tranets] )
num_tranets += max_tranets_in_sys*length(idx_n_tranets[max_tranets_in_sys]) # WARNING: this means we need to ignore planets w/ indices > max_tranets_in_sys
num_tranets = convert(Int64,num_tranets) # TODO OPT: Figure out why isn't this already an Int. I may be doing something that prevents some optimizations
cache["num_tranets"] = num_tranets
cache["idx_tranets"] = idx_tranets # We can save lists of indices to summary stats for pass 2, even though we won't use these for computing a distance or probability
#cache["idx_n_tranets"] = idx_n_tranets
expected_num_detect = 0.0
expected_num_sys_n_tranets = zeros(max_tranets_in_sys)
for i in idx_tranets
for j in 1:num_planets(cat_obs.target[i])
p_tr_and_det = prob_detect(cat_obs.target[i].prob_detect,j) # WARNING: Check why not using cat_phys here?
expected_num_detect += p_tr_and_det
end
for k in 1:max_tranets_in_sys
expected_num_sys_n_tranets[k] += prob_detect_n_planets(cat_obs.target[i].prob_detect,k) # WARNING: Check why not use cat_phys here?
end
end
ssd["expected planets detected"] = expected_num_detect
ssd["num_sys_tranets"] = expected_num_sys_n_tranets
ssd["num targets"] = get_int(param,"num_targets_sim_pass_one")
#println("expected planets = ",expected_num_detect,", num_sys_tranets = ",expected_num_sys_n_tranets,", num targets = ",ssd["num targets"])
# Arrays to store values for each tranet
period_list = zeros(num_tranets)
depth_list = zeros(num_tranets)
weight_list = zeros(num_tranets)
tr_id = 1 # tranet id
for i in idx_tranets # For each target with at least one tranet
targ = cat_obs.target[i]
for j in 1:min(length(targ.obs),max_tranets_in_sys) # For each tranet around that target (but truncated if too many tranets in one system)
#println("# i= ",i," j= ",j," tr_id= ",tr_id)
period_list[tr_id] = targ.obs[j].period
depth_list[tr_id] = targ.obs[j].depth
# (s,p) = targ.phys_id[j]
# ptr = calc_transit_prob_single(cat_phys.target[i],s,p) # WARNING: Could access physical catalog, rather than observed catalog, but obviously that's dangerous for observations.
weight_list[tr_id] = prob_detect(cat_obs.target[i].prob_detect,j)
tr_id += 1
end
end
ssd["P list"] = period_list # We can store whole lists, e.g., if we want to compute K-S distances
ssd["depth list"] = depth_list
ssd["weight list"] = weight_list
idx_good = Bool[ period_list[i]>0.0 && depth_list[i]>0.0 && weight_list[i]>0.0 for i in 1:length(period_list) ]
log_period_list = log10.(period_list[idx_good])
log_depth_list = log10.(depth_list[idx_good])
weight_list = weight_list[idx_good]
weight_sum = sum(weight_list)
ssd["mean log10 P"] = mean_log_P = sum( weight_list .* log_period_list) / weight_sum # TODO TEST: Check that these four weighted mean and stddevs are working properly
ssd["std log10 P"] = sum( weight_list .* (log_period_list.-mean_log_P).^2 ) / weight_sum
ssd["mean log10 depth"] = mean_log_depth = sum( weight_list .* log_depth_list) / weight_sum
ssd["std log10 depth"] = sum( weight_list .* (log_depth_list.-mean_log_depth).^2 ) / weight_sum
return CatalogSummaryStatistics(ssd, cache)
end
function calc_summary_stats_obs_demo(cat_obs::KeplerObsCatalog, param::SimParam ) # Version for observed data, thus no use of cat_phys
ssd = Dict{String,Any}()
cache = Dict{String,Any}()
max_tranets_in_sys = get_int(param,"max_tranets_in_sys") # Demo that simulation parameters can specify how to evalute models, too
idx_tranets = findall(x::KeplerTargetObs-> length(x.obs) > 0, cat_obs.target) # Find indices of systems with at least 1 tranet = potentially detectable transiting planet
# Count total number of tranets and compile indices for N-tranet systems
num_tranets = 0
idx_n_tranets = Vector{Int64}[ [] for m = 1:max_tranets_in_sys]
for n in 1:max_tranets_in_sys-1
idx_n_tranets[n] = findall(x::KeplerTargetObs-> length(x.obs) == n, cat_obs.target[idx_tranets] )
num_tranets += n*length(idx_n_tranets[n])
end
idx_n_tranets[max_tranets_in_sys] = findall(x::KeplerTargetObs-> length(x.obs) >= max_tranets_in_sys, cat_obs.target[idx_tranets] )
num_tranets += max_tranets_in_sys*length(idx_n_tranets[max_tranets_in_sys]) # WARNING: this means we need to ignore planets w/ indices > max_tranets_in_sys
if ( length( findall(x::KeplerTargetObs-> length(x.obs) > max_tranets_in_sys, cat_obs.target[idx_tranets] ) ) > 0) # Make sure max_tranets_in_sys is at least big enough for observed systems
warn("Observational data has more transiting planets in one systems than max_tranets_in_sys allows.")
end
num_tranets = Int64(num_tranets) # TODO OPT: Figure out why isn't this already an Int. I may be doing something that prevents some optimizations
#println("# num_tranets= ",num_tranets)
# QUERY: Is there any reason to cache anything for the real observations? We only need to do this once, so might as well use one pass to simplicity.
#cache["num_tranets"] = num_tranets
#cache["idx_tranets"] = idx_tranets # We can save lists of indices to summary stats for pass 2, even though we won't use these for computing a distance or probability
cache["idx_n_tranets"] = idx_n_tranets
ssd["planets detected"] = num_tranets # WARNING: Note that we'll comparing two different things for simulated and real data during pass 1 (expected planets detected)
num_sys_tranets = zeros(max_tranets_in_sys) # Since observed data, don't need to calculate probabilities.
for n in 1:max_tranets_in_sys # Make histogram of N-tranet systems
num_sys_tranets[n] = length(idx_n_tranets[n])
end
ssd["num_sys_tranets"] = num_sys_tranets
ssd["num targets"] = get_int(param,"num_kepler_targets")
# Arrays to store values for each tranet
period_list = zeros(num_tranets)
depth_list = zeros(num_tranets)
weight_list = ones(num_tranets)
i = 1 # tranet id
for targ in cat_obs.target[idx_tranets] # For each target with at least one tranet
for j in 1:min(length(targ.obs),max_tranets_in_sys) # For each tranet around that target (but truncated if too many tranets in one system)
#println("# i= ",i," j= ",j)
period_list[i] = targ.obs[j].period
depth_list[i] = targ.obs[j].depth
#weight_list[i] = 1.0
i = i+1
end
end
ssd["P list"] = period_list # We can store whole lists, e.g., if we want to compute K-S distances
ssd["depth list"] = depth_list
ssd["weight list"] = weight_list
idx_good = Bool[ period_list[i]>0.0 && depth_list[i]>0.0 for i in 1:length(period_list) ]
log_period_list = log10.(period_list[idx_good])
log_depth_list = log10.(depth_list[idx_good])
ssd["mean log10 P"] = mean_log_P = mean(log_period_list)
ssd["mean log10 depth"] = mean_log_depth = mean(log_depth_list)
ssd["std log10 P"] = stdm(log_period_list,mean_log_P)
ssd["std log10 depth"] = stdm(log_depth_list,mean_log_depth)
return CatalogSummaryStatistics(ssd, cache)
end
# Just returns summary statistics passed, but provides a demo/hook for computing more expensive summary statistics if a model is good enouguh to be worth the extra time.
function calc_summary_stats_sim_pass_two_demo(cat_obs::KeplerObsCatalog, cat_phys::KeplerPhysicalCatalog, ss::CatalogSummaryStatistics, param::SimParam )
return ss
end
function test_summary_statistics(cat_obs::KeplerObsCatalog, cat_phys::KeplerPhysicalCatalog, sim_param::SimParam)
ss = calc_summary_stats_sim_pass_one_demo(cat_obs,cat_phys,sim_param)
#println("len (ss pass 1)= ",length(collect(keys(ss1.stat))))
ss = calc_summary_stats_sim_pass_two_demo(cat_obs,cat_phys,ss,sim_param)
#println("len (ss pass 1)= ",length(collect(keys(ss1.stat))), "... len (ss pass 2)= ",length(collect(keys(ss2.stat))) )
ss = calc_summary_stats_obs_demo(cat_obs,sim_param) # So tests can compare to simulated observed catalog
return ss
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 7427 | ## ExoplanetsSysSim/src/target.jl
## (c) 2015 Eric B. Ford
#using Distributions
struct KeplerTarget
#sys::PlanetarySystem # Make array for planetary systems aroud multiple stars in one target?
sys::Vector{PlanetarySystemAbstract}
cdpp::Array{Float64,2} # fractional, not ppm; 2D to allow for multiple time scales, months, quarters or seasons/spacecraft rotation angles
# QUERY: Do we want a separate CDPP for SC? Or will CDPP's in different months be for LC/SC depending on this variable?
# QUERY: Should this be moved to KeplerTargetObs?
# QUERY: Should we not add this to target and use the star id to lookup CDPP from the stellar table?
contam::Float64 # QUERY: Do we want/need this, since we're able to generate multiple stars in a single target?
data_span::Float64
duty_cycle::Float64
window_function_id::Int64 # Points to the id of the window function for this target
#channel::Int64 # E.g., if we cared which Kepler channel the target fell on
#has_sc::Vector{Bool} # TODO OPT: Make Immutable Vector or BitArray for speed? QUERY: Should this go in KeplerTargetObs?
# # QUERY: Do we want a separate CDPP for SC? Or will CDPP's in different months be for LC/SC depending on this variable?
#ra::Float64 # E.g., if we cared about position on sky QUERY: Should we replace with galactic longitude and latitute?
#dec::Floa64 #
end
num_planets(t::KeplerTarget) = sum( num_planets, t.sys)
flux(t::KeplerTarget) = sum(flux,t.sys)+t.contam
star_table(t::KeplerTarget, sym::Symbol) = StellarTable.star_table(t.sys[1].star.id,sym)
function draw_asymmetric_normal(mu::Real, sig_plus::Real, sig_minus::Real; rn = randn() )
@assert sig_minus >= zero(sig_minus)
mu + ( (stdn>=zero(stdn)) ? sig_plus*rn : sig_minus*rn )
end
function make_cdpp_array_empty(star_id::Integer)
cdpp_arr = Array{Float64,2}(undef,0,0)
end
function make_cdpp_array(star_id::Integer)
star_table(id::Integer,sym::Symbol) = StellarTable.star_table(id,sym)::Float64
cdpp_arr = (1.0e-6*sqrt(1.0/24.0/LC_duration)) .* Float64[star_table(star_id, :rrmscdpp01p5)*sqrt(1.5), star_table(star_id, :rrmscdpp02p0)*sqrt(2.), star_table(star_id,:rrmscdpp02p5)*sqrt(2.5), star_table(star_id,:rrmscdpp03p0)*sqrt(3.), star_table(star_id,:rrmscdpp03p5)*sqrt(3.5), star_table(star_id,:rrmscdpp04p5)*sqrt(4.5), star_table(star_id,:rrmscdpp05p0)*sqrt(5.), star_table(star_id,:rrmscdpp06p0)*sqrt(6.), star_table(star_id,:rrmscdpp07p5)*sqrt(7.5), star_table(star_id,:rrmscdpp09p0)*sqrt(9.), star_table(star_id,:rrmscdpp10p5)*sqrt(10.5), star_table(star_id,:rrmscdpp12p0)*sqrt(12.), star_table(star_id,:rrmscdpp12p5)*sqrt(12.5), star_table(star_id,:rrmscdpp15p0)*sqrt(15.)]
end
function generate_kepler_target_from_table(sim_param::SimParam)
# generate_star = get_function(sim_param,"generate_star")
generate_planetary_system = get_function(sim_param,"generate_planetary_system")
max_draws_star_properties = 20
min_star_radius = 0.5
min_star_mass = 0.5
max_star_radius = 2.0
max_star_mass = 2.0
max_star_density = 1000.0
use_star_table_sigmas = true
min_frac_rad_sigma = 0.06
max_star_id = StellarTable.num_usable_in_star_table()
star_table(id::Integer,sym::Symbol) = StellarTable.star_table(id,sym)
@assert(1<=max_star_id)
star_id = rand(1:max_star_id)
mass = 0.0
dens = 0.0
radius = 0.0
#if use_star_table_sigmas
if get(sim_param,"use_star_table_sigmas",false)
attmpt_num = 0
while (!(min_star_radius<radius<max_star_radius)) || (!(min_star_mass<mass<max_star_mass))# || (!(0.0<dens<max_star_density))
if attmpt_num >= max_draws_star_properties
star_id = rand(1:max_star_id)
attmpt_num = 0
end
rad_errp = max(star_table(star_id,:radius_err1), min_frac_rad_sigma*star_table(star_id,:radius))
rad_errn = max(abs(star_table(star_id,:radius_err2)), min_frac_rad_sigma*star_table(star_id,:radius))
rn = randn()
radius = draw_asymmetric_normal( star_table(star_id,:radius), rad_errp, rad_errn, rn=rn)
mass = draw_asymmetric_normal( star_table(star_id,:mass), star_table(star_id,:mass_err1), abs(star_table(star_id,:mass_err2)), rn=rn )
#dens = draw_asymmetric_normal( star_table(star_id,:dens), star_table(star_id,:dens_err1), abs(star_table(star_id,:dens_err2)) )
attmpt_num += 1
end
# # ZAMS mass-radius relation taken from 15.1.1 of Allen's Astrophysical Quantities (2002)
# if radius > 1.227
# mass = 10^((log10(radius)-0.011)/0.64)
# else
# mass = 10^((log10(radius)+0.02)/0.917)
# end
dens = (mass*sun_mass_in_kg_IAU2010*1000.)/(4//3*pi*(radius*sun_radius_in_m_IAU2015*100.)^3) # Self-consistent density (gm/cm^3)
else
radius = star_table(star_id,:radius)
mass = star_table(star_id,:mass)
#dens = star_table(star_id,:dens)
dens = (mass*sun_mass_in_kg_IAU2010*1000.)/(4//3*pi*(radius*sun_radius_in_m_IAU2015*100.)^3) # Self-consistent density (gm/cm^3)
end
ld = LimbDarkeningParam4thOrder(star_table(star_id,:limbdark_coeff1), star_table(star_id,:limbdark_coeff2), star_table(star_id,:limbdark_coeff3), star_table(star_id,:limbdark_coeff4) )
star = SingleStar(radius,mass,1.0,ld,star_id) # TODO SCI: Allow for blends, binaries, etc.
#cdpp_arr = make_cdpp_array(star_id)
cdpp_arr = make_cdpp_array_empty(star_id) # Note: Now leaving this field empty out and looking up each time via interpolate_cdpp_to_duration_lookup_cdpp instead of interpolate_cdpp_to_duration_use_target_cdpp
contam = star_table(star_id, :contam)
data_span = star_table(star_id, :dataspan)
duty_cycle = star_table(star_id, :dutycycle)
if StellarTable.star_table_has_key(:wf_id)
wf_id = star_table(star_id,:wf_id)
else
wf_id = WindowFunction.get_window_function_id(star_table(star_id,:kepid))
end
# ch = rand(DiscreteUniform(1,84)) # Removed channel in favor of window function id
ps = generate_planetary_system(star, sim_param)
return KeplerTarget([ps],repeat(cdpp_arr, outer=[1,1]),contam,data_span,duty_cycle,wf_id)
end
function generate_kepler_target_simple(sim_param::SimParam)
generate_star = get_function(sim_param,"generate_star")
generate_planetary_system = get_function(sim_param,"generate_planetary_system")
star::StarAbstract = generate_star(sim_param)
mean_log_cdpp = 4.9759601617565465 # mean frmo star table
stddev_log_cdpp = 0.6704860437536709 # std dev from star table
rrmscdpp_5hr = exp(mean_log_cdpp+stddev_log_cdpp*randn())
cdpp_5hr = 1.0e-6 * rrmscdpp_5hr * sqrt(5.0/24.0 / LC_duration )
contam = 0.0 # rand(LogNormal(1.0e-3,1.0)) # TODO SCI: Come up with better description of Kepler targets, maybe draw from real contaminations
wf_id = 0
# ch = rand(DiscreteUniform(1,84)) # Removed channel in favor of window function id
ps = generate_planetary_system(star, sim_param)
return KeplerTarget(PlanetarySystemAbstract[ps],fill(cdpp_5hr,num_cdpp_timescales,num_quarters),contam,mission_data_span,mission_duty_cycle,0)
end
function test_target(sim_param::SimParam)
generate_kepler_target_simple(sim_param)
StellarTable.setup_star_table(sim_param)
generate_kepler_target_from_table(sim_param)
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 38152 | ## ExoplanetsSysSim/src/transit_detection_model.jl
## (c) 2015 Eric B. Ford
# Several functions below based on https://github.com/christopherburke/KeplerPORTs/blob/master/KeplerPORTs_utils.py
# That follows the procedure outlined in Burke et al.(2015).
# However we don't currently interpolate the mesthreshold to the relevant duration
import SpecialFunctions.lgamma
function real_log_choose(m::Float64, n::Float64)::Float64
lgamma(m+1)-lgamma(n+1)-lgamma(m-n+1.0)
end
function real_binom(k::Float64, BigM::Float64, f::Float64)::Float64
F1 = real_log_choose(BigM,k)
F2 = k*log(f)
F3 = (BigM-k)*log(1.0-f)
x = exp(F1+F2+F3)
return x
end
"""
kepler_window_function_binomial_model(exp_num_transits_no_gaps, duty_cycle; min_transits = 3.0)
Binomial window function model for use with Kepler taken from Burke et al. (2015).
# Arguments:
- `exp_num_transits_no_gaps::Float64`: Expected number of transits assuming no gaps in the observation
- `duty_cycle::Float64`: Kepler duty cycle (i.e. fraction of observation time with usable data)
- `min_transits::Float64 = 3.0`: Minimum number of observed transits required for consideration as planet candidate
# Returns:
Probability of detecting at least min_transits given the provided properties of the Kepler target star and planet.
"""
function kepler_window_function_binomial_model(exp_num_transits_no_gaps::Float64, duty_cycle::Float64; min_transits::Float64 = 3.0)::Float64
if exp_num_transits_no_gaps < min_transits
return 0.0
else
return max(1.0 - real_binom(min_transits,exp_num_transits_no_gaps,duty_cycle), 0.0)
end
end
"""
kepler_window_function_binomial_model(t, exp_num_transits_no_gaps, period, duration; min_transits = 3.0)
Binomial window function model for use with Kepler taken from Burke et al. (2015).
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `exp_num_transits_no_gaps::Float64`: Expected number of transits assuming no gaps in the observation
- `period::Float64`: Orbital period of planet (in days)
- `duration::Float64`: Transit duration of planet (in days)
- `min_transits::Float64 = 3.0`: Minimum number of observed transits required for consideration as planet candidate
# Returns:
Probability of detecting at least min_transits given the provided properties of the Kepler target star and planet.
"""
function kepler_window_function_binomial_model(t::KeplerTarget, exp_num_transits_no_gaps::Float64, period::Float64, duration::Float64; min_transits::Float64 = 3.0)::Float64
kepler_window_function_binomial_model(exp_num_transits_no_gaps, t.duty_cycle, min_transits=min_transits)
end
"""
kepler_window_function_dr25_model(t, exp_num_transits_no_gaps, period, duration)
Window function model for use with Kepler that were fit per target for DR25 (see KSCI-19101-002).
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `exp_num_transits_no_gaps::Float64`: Expected number of transits assuming no gaps in the observation
- `period::Float64`: Orbital period of planet (in days)
- `duration::Float64`: Transit duration of planet (in days)
# Returns:
Probability of detecting at least 3 transits given the provided properties of the Kepler target star and planet.
"""
function kepler_window_function_dr25_model(t::KeplerTarget, exp_num_transits_no_gaps::Float64, period::Float64, duration::Float64)::Float64
ExoplanetsSysSim.WindowFunction.eval_window_function(t.window_function_id, Duration=duration, Period=period)
end
# WARNING: Hard-coded function variable for computational efficiency,
# replace with appropriate window function model.
#kepler_window_function = kepler_window_function_binomial_model
kepler_window_function = kepler_window_function_dr25_model
"""
frac_depth_to_tps_depth(frac_depth)
Converts fractional transit depth to depth used by Kepler Transiting Planet Search (TPS) module. (See Christiansen et al. (2015))
NOTE: This assumes a linear limbdarkening coefficient of 0.6
# Arguments:
- `frac_depth::Float64`: Fractional transit depth of planet at transit center.
# Returns:
Kepler TPS depth.
"""
function frac_depth_to_tps_depth(frac_depth::Float64)
alp = 1.0874
bet = 1.0187
REALDEPTH2TPSSQUARE = 1.0 # WARNING: Waiting for this to be confirmed
k = sqrt(frac_depth)
tps_depth = min( (alp-bet*k) * frac_depth* REALDEPTH2TPSSQUARE, 1.0) # NOTE: I added the max based on common sense
return tps_depth::Float64
end
"""
detection_efficiency_theory(mes, expected_num_transits; min_pdet_nonzero = 0.0)
Detection efficiency model assuming perfect theoretical 7.1-sigma error function response.
# Arguments:
- `mes::Float64`: Estimated multiple event statistic (or signal-to-noise ratio) for planet
- `expected_num_transits::Float64`: Expected number of planet transits (assuming no observation gaps)
- `min_pdet_nonzero::Float64 = 0.0`: Minimum probability of detection (if transiting) to be treated as identifiable
# Results:
Probability of detection (if transiting) for planet
"""
function detection_efficiency_theory(mes::Float64, expected_num_transits::Float64; min_pdet_nonzero::Float64 = 0.0)
muoffset = 0.0
sig = 1.0
mesthresh = 7.1
mes *= 1.003
if mes > (9.0 - mesthresh - muoffset)
return 1.0
else
pdet = 0.5 + 0.5*erf((mes - mesthresh - muoffset) / sqrt(2.0*sig*sig))
pdet = pdet >= min_pdet_nonzero ? pdet : 0.0
return pdet
end
end
"""
detection_efficiency_fressin2013(mes, expected_num_transits)
Detection efficiency model using the linear ramp from Fressin (2013).
# Arguments:
- `mes::Float64`: Estimated multiple event statistic (or signal-to-noise ratio) for planet
- `expected_num_transits::Float64`: Expected number of planet transits (assuming no observation gaps)
# Results:
Probability of detection (if transiting) for planet
"""
function detection_efficiency_fressin2013(mes::Float64, expected_num_transits::Float64)
mesmin = 6.0
mesmax = 16.0
if mes <= mesmin
return 0.0
elseif mes >= mesmax
return 1.0
else
return (mes - mesmin) / (mesmax - mesmin)
end
end
"""
detection_efficiency_christiansen2015(mes, expected_num_transits; mes_threshold = 7.1, min_pdet_nonzero = 0.0)
Detection efficiency model using a Gamma function taken from Christiansen (2015).
# Arguments:
- `mes::Float64`: Estimated multiple event statistic (or signal-to-noise ratio) for planet
- `expected_num_transits::Float64`: Expected number of planet transits (assuming no observation gaps)
- `mes_threshold::Float64 = 7.1`: Minimum multiple event statistic value for signal to have been considered a potential planet candidate signal
- `min_pdet_nonzero::Float64 = 0.0`: Minimum probability of detection (if transiting) to be treated as identifiable
# Results:
Probability of detection (if transiting) for planet
"""
function detection_efficiency_christiansen2015(mes::Float64, expected_num_transits::Float64; mes_threshold::Float64 = 7.1, min_pdet_nonzero::Float64 = 0.0)
a = 4.65 # from code for detection_efficiency(...) at https://github.com/christopherburke/KeplerPORTs/blob/master/KeplerPORTs_utils.py
# b = 1.05
# a = 4.35 # from arxiv abstract. Informal testing showed it didn't matter
b = 0.98
mes *= 1.003
usemes = max(0.0,mes - 7.1 - (mes_threshold - 7.1))
pdet = cdf(Gamma(a,b), usemes)
pdet = pdet >= min_pdet_nonzero ? pdet : 0.0
return pdet
end
"""
detection_efficiency_dr25_simple(mes, expected_num_transits; min_pdet_nonzero = 0.0)
Detection efficiency model using a Gamma function fit over a FGK sample of DR25 targets, taken from KSCI-19110-001.
# Arguments:
- `mes::Float64`: Estimated multiple event statistic (or signal-to-noise ratio) for planet
- `expected_num_transits::Float64`: Expected number of planet transits (assuming no observation gaps)
- `min_pdet_nonzero::Float64 = 0.0`: Minimum probability of detection (if transiting) to be treated as identifiable
# Results:
Probability of detection (if transiting) for planet
"""
function detection_efficiency_dr25_simple(mes::Float64, expected_num_transits::Float64; min_pdet_nonzero::Float64 = 0.0)::Float64
a = 30.87 # from pg 16 of https://exoplanetarchive.ipac.caltech.edu/docs/KSCI-19110-001.pdf
b = 0.271
c = 0.940
mes *= 1.003
dist = Gamma(a,b)
pdet::Float64 = c*cdf(dist, mes)::Float64
pdet = pdet >= min_pdet_nonzero ? pdet : 0.0
return pdet
end
"""
get_param_for_detection_and_vetting_efficiency_depending_on_num_transits(num_tr)
Determine appropriate parameters to use in Gamma function model for detection efficiency and vetting of Kepler planet candidates. Fit using the simulated transit injection tests on Kepler DR25 targets.
# Arguments:
- `num_tr::Integer`: (Expected) number of observed transits for planet
# Returns:
alpha, beta, and C, the three parameters to be used for the Gamma detection efficiency function
"""
function get_param_for_detection_and_vetting_efficiency_depending_on_num_transits(num_tr::Integer)
if num_tr <= 3
return (33.3884, 0.264472, 0.699093)
elseif num_tr <= 4
return (32.886, 0.269577, 0.768366)
elseif num_tr <= 5
return (31.5196, 0.282741, 0.833673)
elseif num_tr <= 6
return (30.9919, 0.286979, 0.859865)
elseif num_tr <= 9
return (30.1906, 0.294688, 0.875042)
elseif num_tr <= 18
return (31.6342, 0.279425, 0.886144)
elseif num_tr <= 36
return (32.6448, 0.268898, 0.889724)
else
return (27.8185, 0.32432, 0.945075)
end
end
"""
detection_and_vetting_efficiency_model_v1(mes, expected_num_transits; min_pdet_nonzero = 0.0)
Detection efficiency model using a Gamma function whose parameters were determined using a fit over the simulated transit injection tests of a sample of FGK DR25 targets (see Hsu et al. (2019)).
# WARNING: Combined detection and vetting efficiency model - do NOT include additional vetting efficiency
# Arguments:
- `mes::Float64`: Estimated multiple event statistic (or signal-to-noise ratio) for planet
- `expected_num_transits::Float64`: Expected number of planet transits (assuming no observation gaps)
- `min_pdet_nonzero::Float64 = 0.0`: Minimum probability of detection (if transiting) to be treated as identifiable
# Returns:
Probability of detection (if transiting) for planet
"""
function detection_and_vetting_efficiency_model_v1(mes::Float64, expected_num_transits::Float64; min_pdet_nonzero::Float64 = 0.0)::Float64
mes *= 1.003
num_transit_int = convert(Int64,floor(expected_num_transits))
num_transit_int += rand() < expected_num_transits-num_transit_int ? 1 : 0
a, b, c = get_param_for_detection_and_vetting_efficiency_depending_on_num_transits(num_transit_int)
dist = Gamma(a,b)
pdet::Float64 = c*cdf(dist, mes)::Float64
pdet = pdet >= min_pdet_nonzero ? pdet : 0.0
return pdet
end
# WARNING: Hardcoded choice of transit detection efficiency here for speed and so as to not have it hardcoded in multiple places
#detection_efficiency_model = detection_efficiency_christiansen2015
# detection_efficiency_model = detection_efficiency_dr25_simple
detection_efficiency_model = detection_and_vetting_efficiency_model_v1
"""
vetting_efficiency_none(R_p, P)
Vetting efficiency model assuming all identified planet candidate signals are true positives
# Arguments:
- `R_p::Real`: Planet radius
- `P::Real`: Planet orbital period
# Returns:
1
"""
function vetting_efficiency_none(R_p::Real, P::Real)
return 1.0
end
"""
vetting_efficiency_dr25_mulders(R_p, P)
Vetting efficiency model from Mulders et al. (2018) using parameters fit for Gaia DR2 stellar properties applied to Kepler DR25 targets independent of reliability score (through e-mail communication).
# Arguments:
- `R_p::Real`: Planet radius (in R_sol)
- `P::Real`: Planet orbital period (in days)
# Returns:
Probability of planet with given properties to be vetted as a planet candidate.
"""
function vetting_efficiency_dr25_mulders(R_p::Real, P::Real)
c = 0.93
a_R = -0.03
P_break = 205.
a_P = 0.00
b_P = -0.24
pvet = c*(R_p/earth_radius)^a_R
if P < P_break
pvet *= (P/P_break)^a_P
else
pvet *= (P/P_break)^b_P
end
return pvet
end
"""
vetting_efficiency_dr25_mulders_score_cut(R_p, P)
Vetting efficiency model from Mulders et al. (2018) using parameters fit for Gaia DR2 stellar properties applied to Kepler DR25 targets selected via reliability score (arXiv 1805.08211).
# Arguments:
- `R_p::Real`: Planet radius (in R_sol)
- `P::Real`: Planet orbital period (in days)
# Returns:
Probability of planet with given properties to be vetted as a planet candidate.
"""
function vetting_efficiency_dr25_mulders_score_cut(R_p::Real, P::Real)
c = 0.63
a_R = 0.19
P_break = 53.
a_P = -0.07
b_P = -0.39
pvet = c*(R_p/earth_radius)^a_R
if P < P_break
pvet *= (P/P_break)^a_P
else
pvet *= (P/P_break)^b_P
end
return pvet
end
# WARNING: Hardcoded choice of planet vetting efficiency here for speed and so as to not have it hardcoded in multiple places
vetting_efficiency = vetting_efficiency_none
"""
interpolate_cdpp_to_duration_use_target_cdpp(t, duration)
Linearly interpolated CDPP using list of provided CDPP at different transit durations stored in Kepler target object (DEPRECATED).
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `duration::Float64`: Transit duration (in days)
# Results:
Estimated CDPP at given transit duration for Kepler target.
"""
function interpolate_cdpp_to_duration_use_target_cdpp(t::KeplerTarget, duration::Float64)::Float64
duration_in_hours = duration *24.0
dur_idx = searchsortedlast(cdpp_durations,duration_in_hours) # cdpp_durations is defined in constants.jl
if dur_idx <= 0
cdpp = t.cdpp[1,1]
elseif dur_idx==length(cdpp_durations) && (duration_in_hours >= cdpp_durations[end]) # Should be 15 cdpp_durations.
cdpp = t.cdpp[length(cdpp_durations),1]
else
w = ((duration_in_hours)-cdpp_durations[dur_idx]) / (cdpp_durations[dur_idx+1]-cdpp_durations[dur_idx])
cdpp = w*t.cdpp[dur_idx+1,1] + (1-w)*t.cdpp[dur_idx,1]
end
return cdpp
end
"""
interpolate_cdpp_to_duration_lookup_cdpp(t, duration)
Linearly interpolated CDPP using list of provided CDPP at different transit durations stored in stellar catalog dataframe.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `duration::Float64`: Transit duration (in days)
# Results:
Estimated CDPP at given transit duration for Kepler target.
"""
function interpolate_cdpp_to_duration_lookup_cdpp(t::KeplerTarget, duration::Float64)::Float64
duration_in_hours = duration *24.0
dur_idx = searchsortedlast(cdpp_durations,duration_in_hours) # cdpp_durations is defined in constants.jl
get_cdpp(i::Integer) = 1.0e-6*sqrt(cdpp_durations[i]/24.0/LC_duration)*star_table(t,duration_symbols[i])
if dur_idx <= 0
cdpp = get_cdpp(1)
elseif dur_idx==length(cdpp_durations) && (duration_in_hours >= cdpp_durations[end]) # Should be 15 cdpp_durations.
cdpp = get_cdpp(length(cdpp_durations))
else
w = ((duration_in_hours)-cdpp_durations[dur_idx]) / (cdpp_durations[dur_idx+1]-cdpp_durations[dur_idx])
cdpp = w*get_cdpp(dur_idx+1) + (1-w)*get_cdpp(dur_idx)
end
return cdpp
end
#interpolate_cdpp_to_duration = interpolate_cdpp_to_duration_use_target_cdpp
interpolate_cdpp_to_duration = interpolate_cdpp_to_duration_lookup_cdpp
"""
calc_snr_if_transit_cdpp(t, depth, duration, cdpp, sim_param; num_transit = 1)
Calculate the expected multiple event statistic (signal-to-noise ratio) for planet around Kepler target star using CDPP.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `depth::Float64`: (Fractional) transit depth
- `duration::Float64`: Transit duration (in days)
- `cdpp::Float64`: CDPP for target star given transit duration
- `sim_param::SimParam`: Simulation parameters
- `num_transit::Float64 = 1`: Expected number of transits observed
# Returns:
(Expected) multiple event statistic (signal-to-noise ratio)
"""
function calc_snr_if_transit_cdpp(t::KeplerTarget, depth::Float64, duration::Float64, cdpp::Float64, sim_param::SimParam; num_transit::Float64 = 1)
# depth_tps = frac_depth_to_tps_depth(depth) # TODO SCI: WARNING: Hardcoded this conversion. Remove once depth calculated using limb darkening model
# snr = depth_tps*sqrt(num_transit*duration*LC_rate)/cdpp # WARNING: Assumes measurement uncertainties are uncorrelated & CDPP based on LC
snr = depth*sqrt(num_transit*duration*LC_rate)/cdpp # WARNING: Assumes measurement uncertainties are uncorrelated & CDPP based on LC
end
"""
calc_snr_if_transit(t, depth, duration, osd, sim_param; num_transit = 1)
Calculate the expected multiple event statistic (signal-to-noise ratio) for planet around Kepler target star using 1-sigma depth function (OSD).
# NOTE: Assumes OSD functions have already been read in for all relevant Kepler targets.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `depth::Real`: (Fractional) transit depth
- `duration::Real`: Transit duration (in days)
- `osd::Real`: OSD for target star given transit duration and period
- `sim_param::SimParam`: Simulation parameters
- `num_transit::Real = 1`: Expected number of transits observed
# Returns:
(Expected) multiple event statistic (signal-to-noise ratio)
"""
function calc_snr_if_transit(t::KeplerTarget, depth::Real, duration::Real, osd::Real, sim_param::SimParam; num_transit::Real = 1)
# depth_tps = frac_depth_to_tps_depth(depth) # WARNING: Hardcoded this conversion
# snr = depth_tps/osd*1.0e6 # osd is in ppm
snr = depth/osd*1.0e6 # osd is in ppm
end
"""
calc_snr_if_transit_central(t, s, p, sim_param)
Calculate the expected multiple event statistic (signal-to-noise ratio) for planet around Kepler target star using 1-sigma depth function (OSD) at transit center.
# NOTE: Assumes OSD functions have already been read in for all relevant Kepler targets.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Star index within Kepler target
- `p::Integer`: Planet index within Kepler target
- `sim_param::SimParam`: Simulation parameters
# Returns:
(Expected) multiple event statistic (signal-to-noise ratio) at transit center
"""
function calc_snr_if_transit_central(t::KeplerTarget, s::Integer, p::Integer, sim_param::SimParam)
period = t.sys[s].orbit[p].P
depth = calc_transit_depth(t,s,p)
duration_central = calc_transit_duration_eff_central(t,s,p)
kepid = StellarTable.star_table(t.sys[s].star.id, :kepid)
osd_duration_central = get_durations_searched_Kepler(period,duration_central) #tests if durations are included in Kepler's observations for a certain planet period. If not, returns nearest possible duration
osd_central = WindowFunction.interp_OSD_from_table(kepid, period, osd_duration_central)
if osd_duration_central > duration_central #use a correcting factor if this duration is lower than the minimum searched for this period.
osd_central = osd_central*osd_duration_central/duration_central
end
num_transit = calc_expected_num_transits(t,s,p,sim_param)
calc_snr_if_transit(t,depth,duration_central,osd_central, sim_param,num_transit=num_transit)
end
"""
calc_snr_if_transit_central_cdpp(t, s, p, sim_param)
Calculate the expected multiple event statistic (signal-to-noise ratio) for planet around Kepler target star using CDPP at transit center.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Star index within Kepler target
- `p::Integer`: Planet index within Kepler target
- `sim_param::SimParam`: Simulation parameters
# Returns:
(Expected) multiple event statistic (signal-to-noise ratio) at transit center
"""
function calc_snr_if_transit_central_cdpp(t::KeplerTarget, s::Integer, p::Integer, sim_param::SimParam)
depth = calc_transit_depth(t,s,p)
duration_central = calc_transit_duration_eff_central(t,s,p)
cdpp = interpolate_cdpp_to_duration(t, duration_central)
num_transit = calc_expected_num_transits(t,s,p,sim_param)
calc_snr_if_transit_cdpp(t,depth,duration_central,cdpp, sim_param,num_transit=num_transit)
end
"""
calc_prob_detect_if_transit(t, snr, period, duration, sim_param; num_transit = 1)
calc_prob_detect_if_transit(t, depth, period, duration, osd, sim_param; num_transit = 1)
Calculate probability of detecting planet signal (if planet transits) using 1-sigma depth (OSD) function.
# NOTE: Assumes OSD functions already read in.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `snr::Float64`: Expected multiple event statistic (signal-to-noise ratio)
- `depth::Float64`: (Fractional) transit depth
- `period::Float64`: Orbital period (in days)
- `duration::Float64`: Transit duration (in days)
- `osd::Float64`: OSD for target star given transit duration and period
- `sim_param::SimParam`: Simulation parameters
- `num_transit::Float64 = 1`: Expected number of transits observed
# Returns:
Probability of detecting planet (if it transits)
"""
function calc_prob_detect_if_transit(t::KeplerTarget, snr::Float64, period::Float64, duration::Float64, sim_param::SimParam; num_transit::Float64 = 1)
min_pdet_nonzero = 1.0e-4 # TODO OPT: Consider raising threshold to prevent a plethora of planets that are very unlikely to be detected due to using 0.0 or other small value here
wf = kepler_window_function(t, num_transit, period, duration)
return wf*detection_efficiency_model(snr, num_transit, min_pdet_nonzero=min_pdet_nonzero)
end
function calc_prob_detect_if_transit(t::KeplerTarget, depth::Float64, period::Float64, duration::Float64, osd::Float64, sim_param::SimParam; num_transit::Float64 = 1)
snr = calc_snr_if_transit(t,depth,duration,osd, sim_param, num_transit=num_transit)
return calc_prob_detect_if_transit(t, snr, period, duration, sim_param, num_transit=num_transit)
end
"""
calc_prob_detect_if_transit_cdpp(t, depth, period, duration, cdpp, sim_param; num_transit = 1)
Calculate probability of detecting planet signal (if planet transits) using CDPP.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `depth::Float64`: (Fractional) transit depth
- `period::Float64`: Orbital period (in days)
- `duration::Float64`: Transit duration (in days)
- `cdpp::Float64`: CDPP for target star given transit duration
- `sim_param::SimParam`: Simulation parameters
- `num_transit::Float64 = 1`: Expected number of transits observed
# Returns:
Probability of detecting planet (if it transits)
"""
function calc_prob_detect_if_transit_cdpp(t::KeplerTarget, depth::Float64, period::Float64, duration::Float64, cdpp::Float64, sim_param::SimParam; num_transit::Float64 = 1)
snr = calc_snr_if_transit_cdpp(t,depth,duration,cdpp, sim_param, num_transit=num_transit)
return calc_prob_detect_if_transit(t, snr, period, duration, sim_param, num_transit=num_transit)
end
"""
calc_prob_detect_if_transit_central(t, s, p, sim_param)
Calculate probability of detecting planet signal (if planet transits) at transit center using 1-sigma depth (OSD) functions.
# NOTE: Assumes OSD functions already read in.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Star index within Kepler target
- `p::Integer`: Planet index within Kepler target
- `sim_param::SimParam`: Simulation parameters
# Returns:
Probability of detecting planet (if it transits) at transit center
"""
function calc_prob_detect_if_transit_central(t::KeplerTarget, s::Integer, p::Integer, sim_param::SimParam)
period = t.sys[s].orbit[p].P
depth = calc_transit_depth(t,s,p)
duration_central = calc_transit_duration_eff_central(t,s,p)
kepid = StellarTable.star_table(t.sys[s].star.id, :kepid)
osd_duration_central = get_durations_searched_Kepler(period,duration_central) #tests if durations are included in Kepler's observations for a certain planet period. If not, returns nearest possible duration
osd_central = WindowFunction.interp_OSD_from_table(kepid, period, osd_duration_central)
if osd_duration_central > duration_central #use a correcting factor if this duration is lower than the minimum searched for this period.
osd_central = osd_central*osd_duration_central/duration_central
end
ntr = calc_expected_num_transits(t,s,p,sim_param)
calc_prob_detect_if_transit(t,depth,period,duration_central,osd_central, sim_param, num_transit=ntr)
end
"""
calc_prob_detect_if_transit_central_cdpp(t, s, p, sim_param)
Calculate probability of detecting planet signal (if planet transits) at transit center using CDPP
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Star index within Kepler target
- `p::Integer`: Planet index within Kepler target
- `sim_param::SimParam`: Simulation parameters
# Returns:
Probability of detecting planet (if it transits) at transit center
"""
function calc_prob_detect_if_transit_central_cdpp(t::KeplerTarget, s::Integer, p::Integer, sim_param::SimParam)
period = t.sys[s].orbit[p].P
depth = calc_transit_depth(t,s,p)
duration_central = calc_transit_duration_eff_central(t,s,p)
cdpp = interpolate_cdpp_to_duration(t, duration_central)
ntr = calc_expected_num_transits(t,s,p,sim_param)
calc_prob_detect_if_transit_cdpp(t,depth,period,duration_central,cdpp, sim_param, num_transit=ntr)
end
"""
calc_prob_detect_if_transit_with_actual_b(t, s, p, sim_param)
Calculate probability of detecting planet signal (if planet transits) at transit center using 1-sigma depth (OSD) functions and the impact parameter of the provided orbit.
# NOTE: Assumes OSD functions already read in.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Star index within Kepler target
- `p::Integer`: Planet index within Kepler target
- `sim_param::SimParam`: Simulation parameters
# Returns:
Probability of detecting planet (if it transits) given the impact parameter of the planet's orbit
"""
function calc_prob_detect_if_transit_with_actual_b(t::KeplerTarget, s::Integer, p::Integer, sim_param::SimParam)
period = t.sys[s].orbit[p].P
size_ratio = t.sys[s].planet[p].radius/t.sys[s].star.radius
depth = calc_transit_depth(t,s,p)
duration = calc_transit_duration_eff(t,s,p)
b = calc_impact_parameter(t.sys[s],p)
snr_correction = calc_depth_correction_for_grazing_transit(b,size_ratio)
depth *= snr_correction
kepid = StellarTable.star_table(t.sys[s].star.id, :kepid)
osd_duration = get_durations_searched_Kepler(period,duration) #tests if durations are included in Kepler's observations for a certain planet period. If not, returns nearest possible duration
osd = WindowFunction.interp_OSD_from_table(kepid, period, osd_duration)
if osd_duration > duration #use a correcting factor if this duration is lower than the minimum searched for this period.
osd = osd*osd_duration/duration
end
ntr = calc_expected_num_transits(t,s,p,sim_param)
calc_prob_detect_if_transit(t,depth,period,duration,osd, sim_param, num_transit=ntr)
end
"""
calc_prob_detect_if_transit_with_actual_b_cdpp(t, s, p, sim_param)
Calculate probability of detecting planet signal (if planet transits) at transit center using CDPP and the impact parameter of the provided orbit.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Star index within Kepler target
- `p::Integer`: Planet index within Kepler target
- `sim_param::SimParam`: Simulation parameters
# Returns:
Probability of detecting planet (if it transits) given the impact parameter of the planet's orbit
"""
function calc_prob_detect_if_transit_with_actual_b_cdpp(t::KeplerTarget, s::Integer, p::Integer, sim_param::SimParam)
period = t.sys[s].orbit[p].P
size_ratio = t.sys[s].planet[p].radius/t.sys[s].star.radius
depth = calc_transit_depth(t,s,p)
duration = calc_transit_duration_eff(t,s,p)
b = calc_impact_parameter(t.sys[s],p)
snr_correction = calc_depth_correction_for_grazing_transit(b,size_ratio)
depth *= snr_correction
cdpp = interpolate_cdpp_to_duration(t, duration)
ntr = calc_expected_num_transits(t,s,p,sim_param)
calc_prob_detect_if_transit_cdpp(t,depth,period,duration,cdpp, sim_param, num_transit=ntr)
end
"""
calc_ave_prob_detect_if_transit_from_snr(t, snr_central, period, duration_central, size_ratio, osd_central, sim_param; num_transit = 1)
Calculate probability of detecting planet signal (if planet transits) using 1-sigma depth (OSD) function and averaged over impact parameters b~U[0,1).
# NOTE: Assumes OSD functions already read in.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `snr_central::Float64`: Expected multiple event statistic (signal-to-noise ratio) at transit center
- `period::Float64`: Orbital period (in days)
- `duration_central::Float64`: Transit duration (in days) at transit center
- `size_ratio::Float64`: Ratio of planet-to-star radii
- `osd_central::Float64`: OSD for target star given transit duration at transit center and period
- `sim_param::SimParam`: Simulation parameters
- `num_transit::Float64 = 1`: Expected number of transits observed
# Returns:
Probability of detecting planet (if it transits) averaged over impact parameter
"""
function calc_ave_prob_detect_if_transit_from_snr(t::KeplerTarget, snr_central::Float64, period::Float64, duration_central::Float64, size_ratio::Float64, osd_central::Float64, sim_param::SimParam; num_transit::Float64 = 1)
min_pdet_nonzero = 1.0e-4
wf = kepler_window_function(t, num_transit, period, duration_central)
detection_efficiency_central = detection_efficiency_model(snr_central, num_transit, min_pdet_nonzero=min_pdet_nonzero)
if wf*detection_efficiency_central <= min_pdet_nonzero
return 0.
end
# Breaking integral into two sections [0,1-b_boundary) and [1-b_boundary,1], so need at least 5 points to evaluate integral via trapezoid rule
num_impact_param_low_b = 7 # Number of points to evaluate integral over [0,1-b_boundary) via trapezoid rule
num_impact_param_high_b = 5 # (size_ratio<=0.05) ? 5 : 11 # Number of points to evaluate integral over [1-b_boudnary,1) via trapezoid rule. If using 2*size_ratio for bondary for small planets, then keep this odd, so one point lands on 1-size_ratio.
@assert(num_impact_param_low_b >= 5)
@assert(num_impact_param_high_b >= 3)
num_impact_param = num_impact_param_low_b+num_impact_param_high_b-1 # One point is shared
b_boundary = (size_ratio <= 0.15) ? 2*size_ratio : min(max(0.3,size_ratio),0.5)
b = Array{Float64}(undef,num_impact_param)
weight = Array{Float64}(undef,num_impact_param)
b[1:num_impact_param_low_b] = range(0.0,stop=1-b_boundary,length=num_impact_param_low_b)
b[num_impact_param_low_b:num_impact_param] .= range(1-b_boundary,stop=1.0,length=num_impact_param_high_b)
weight[1:num_impact_param_low_b] .= (1-b_boundary)/(num_impact_param_low_b-1) # Points for first integral
weight[1] *= 0.5 # Lower endpoint of first integral
weight[num_impact_param_low_b] *= 0.5 # Upper endpoint of first integral
weight[num_impact_param_low_b] += 0.5*(b_boundary)/(num_impact_param_high_b-1) # Also lower endpoint of second integral
weight[(num_impact_param_low_b+1):num_impact_param] .= b_boundary/(num_impact_param_high_b-1)
weight[num_impact_param] *= 0.5 # Upper endpoint of second integral
@assert isapprox(sum(weight),1.0)
function integrand(b::Float64)::Float64
depth_factor = calc_depth_correction_for_grazing_transit(b,size_ratio)
duration_factor = calc_transit_duration_eff_factor_for_impact_parameter_b(b,size_ratio)
kepid = StellarTable.star_table(t.sys[1].star.id, :kepid)
osd_duration = get_durations_searched_Kepler(period,duration_central*duration_factor) #tests if durations are included in Kepler's observations for a certain planet period. If not, returns nearest possible duration
osd = WindowFunction.interp_OSD_from_table(kepid, period, osd_duration)
if osd_duration > duration_central*duration_factor #use a correcting factor if this duration is lower than the minimum searched for this period.
osd = osd*osd_duration/(duration_central*duration_factor)
end
snr_factor = depth_factor*(osd_central/osd)
detection_efficiency_model(snr_central*snr_factor, num_transit, min_pdet_nonzero=min_pdet_nonzero)
end
ave_detection_efficiency = sum(weight .* map(integrand,b)::Vector{Float64} )
return wf*ave_detection_efficiency
end
"""
calc_ave_prob_detect_if_transit_from_snr_cdpp(t, snr_central, period, duration_central, size_ratio, osd_central, sim_param; num_transit = 1)
Calculate probability of detecting planet signal (if planet transits) using CDPP and averaged over impact parameters b~U[0,1).
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `snr_central::Float64`: Expected multiple event statistic (signal-to-noise ratio) at transit center
- `period::Float64`: Orbital period (in days)
- `duration_central::Float64`: Transit duration (in days) at transit center
- `size_ratio::Float64`: Ratio of planet-to-star radii
- `cdpp_central::Float64`: CDPP for target star given transit duration at transit center
- `sim_param::SimParam`: Simulation parameters
- `num_transit::Float64 = 1`: Expected number of transits observed
# Returns:
Probability of detecting planet (if it transits) averaged over impact parameter
"""
function calc_ave_prob_detect_if_transit_from_snr_cdpp(t::KeplerTarget, snr_central::Float64, period::Float64, duration_central::Float64, size_ratio::Float64, cdpp_central::Float64, sim_param::SimParam; num_transit::Float64 = 1)
min_pdet_nonzero = 1.0e-4
wf = kepler_window_function(t, num_transit, period, duration_central)
detection_efficiency_central = detection_efficiency_model(snr_central, num_transit, min_pdet_nonzero=min_pdet_nonzero)
if wf*detection_efficiency_central <= min_pdet_nonzero
return 0.
end
# Breaking integral into two sections [0,1-b_boundary) and [1-b_boundary,1], so need at least 5 points to evaluate integral via trapezoid rule
num_impact_param_low_b = 7 # Number of points to evaluate integral over [0,1-b_boundary) via trapezoid rule
num_impact_param_high_b = 5 # (size_ratio<=0.05) ? 5 : 11 # Number of points to evaluate integral over [1-b_boudnary,1) via trapezoid rule. If using 2*size_ratio for bondary for small planets, then keep this odd, so one point lands on 1-size_ratio.
@assert(num_impact_param_low_b >= 5)
@assert(num_impact_param_high_b >= 3)
num_impact_param = num_impact_param_low_b+num_impact_param_high_b-1 # One point is shared
b_boundary = (size_ratio <= 0.15) ? 2*size_ratio : min(max(0.3,size_ratio),0.5)
b = Array{Float64}(undef,num_impact_param)
weight = Array{Float64}(undef,num_impact_param)
b[1:num_impact_param_low_b] = range(0.0,stop=1-b_boundary,length=num_impact_param_low_b)
b[num_impact_param_low_b:num_impact_param] .= range(1-b_boundary,stop=1.0,length=num_impact_param_high_b)
weight[1:num_impact_param_low_b] .= (1-b_boundary)/(num_impact_param_low_b-1) # Points for first integral
weight[1] *= 0.5 # Lower endpoint of first integral
weight[num_impact_param_low_b] *= 0.5 # Upper endpoint of first integral
weight[num_impact_param_low_b] += 0.5*(b_boundary)/(num_impact_param_high_b-1) # Also lower endpoint of second integral
weight[(num_impact_param_low_b+1):num_impact_param] .= b_boundary/(num_impact_param_high_b-1)
weight[num_impact_param] *= 0.5 # Upper endpoint of second integral
@assert isapprox(sum(weight),1.0)
function integrand(b::Float64)::Float64
depth_factor = calc_depth_correction_for_grazing_transit(b,size_ratio)
duration_factor = calc_transit_duration_eff_factor_for_impact_parameter_b(b,size_ratio)
cdpp = interpolate_cdpp_to_duration(t,duration_central*duration_factor)
snr_factor = depth_factor*sqrt(duration_factor)*(cdpp_central/cdpp)
detection_efficiency_model(snr_central*snr_factor, num_transit, min_pdet_nonzero=min_pdet_nonzero)
end
ave_detection_efficiency = sum(weight .* map(integrand,b)::Vector{Float64} )
return wf*ave_detection_efficiency
end
"""
calc_ave_prob_detect_if_transit_cdpp(t, depth, period, duration_central, size_ratio, sim_param; num_transit = 1)
calc_ave_prob_detect_if_transit_cdpp(t, s, p, sim_param)
Calculate probability of detecting planet signal (if planet transits) using CDPP and averaged over impact parameters b~U[0,1).
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `depth::Float64`: (Fractional) transit depth
- `period::Float64`: Orbital period (in days)
- `duration_central::Float64`: Transit duration (in days) at transit center
- `size_ratio::Float64`: Ratio of planet-to-star radii
- `s::Integer`: Star index within Kepler target
- `p::Integer`: Planet index within Kepler target
- `sim_param::SimParam`: Simulation parameters
- `num_transit::Float64 = 1`: Expected number of transits observed
# Returns:
Probability of detecting planet (if it transits) averaged over impact parameter
"""
function calc_ave_prob_detect_if_transit_cdpp(t::KeplerTarget, depth::Float64, period::Float64, duration_central::Float64, size_ratio::Float64, sim_param::SimParam; num_transit::Float64 = 1)
cdpp_central = interpolate_cdpp_to_duration(t, duration_central)
snr_central = calc_snr_if_transit(t,depth,duration_central,cdpp_central, sim_param, num_transit=num_transit)
return calc_ave_prob_detect_if_transit_from_snr_cdpp(t, snr_central, period, duration_central, size_ratio, cdpp_central, sim_param, num_transit=num_transit)
end
function calc_ave_prob_detect_if_transit_cdpp(t::KeplerTarget, s::Integer, p::Integer, sim_param::SimParam)
size_ratio = t.sys[s].planet[p].radius/t.sys[s].star.radius
depth = calc_transit_depth(t,s,p)
period = t.sys[s].orbit[p].P
duration_central = calc_transit_duration_eff_central(t,s,p)
ntr = calc_expected_num_transits(t,s,p,sim_param)
calc_ave_prob_detect_if_transit_cdpp(t,depth,period,duration_central, size_ratio, sim_param, num_transit=ntr)
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 57028 | ## ExoplanetsSysSim/src/transit_observations.jl
## (c) 2015 Eric B. Ford
#using Distributions
#include("constants.jl")
#include("newPDST.jl") #includes a function to calculate if given durations match those observed by Kepler for a given period
# Needed if using full noise model
using LinearAlgebra
using PDMats
# Starting Section of Observables that are actually used
struct TransitPlanetObs
# ephem::ephemeris_type # For now hardcode P and t0, see transit_observation_unused.jl to reinstate
period::Float64 # days
t0::Float64 # days
depth::Float64 # fractional
duration::Float64 # days; Full-width, half-max-duration until further notice
# ingress_duration::Float64 # days; QUERY: Will we want to use the ingress/egress duration for anything?
end
TransitPlanetObs() = TransitPlanetObs(0.0,0.0,0.0,0.0)
struct StarObs
radius::Float64 # in Rsol
mass::Float64 # in Msol
id::Int64 # row number in stellar dataframe
end
period(obs::TransitPlanetObs) = obs.period
depth(obs::TransitPlanetObs) = obs.depth
duration(obs::TransitPlanetObs) = obs.duration
period_given_semimajor_axis(a::Float64, M::Float64) = sqrt((4pi^2*a^3)/(grav_const*M))
"""
semimajor_axis(P, M)
semimajor_axis(ps, id)
Calculate semimajor axis
# Arguments:
- `P::Float64`: Orbital period (in days)
- `M::Float64`: Planet mass (in M_sol)
- `ps::PlanetarySystemAbstract`: Planetary system object
- `id::Integer`: Planet/orbit index in planetary system object
# Returns:
Semimajor axis of planet (in AU)
"""
semimajor_axis(P::Float64, M::Float64) = (grav_const/(4pi^2)*M*P*P)^(1/3)
function semimajor_axis(ps::PlanetarySystemAbstract, id::Integer)
M = mass(ps.star) + ps.planet[id].mass # TODO SCI DETAIL: Replace with Jacobi mass? Not important unless start including TTVs, even then unlikely to matter
@assert(M>0.0)
@assert(ps.orbit[id].P>0.0)
return semimajor_axis(ps.orbit[id].P,M)
end
"""
calc_transit_depth(t, s, p)
Calculate fractional transit depth of planet transiting host star (including limb darkening)
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Fractional transit depth at transit midpoint
"""
function calc_transit_depth(t::KeplerTarget, s::Integer, p::Integer) # WARNING: IMPORTANT: Assumes non-grazing transit
radius_ratio = t.sys[s].planet[p].radius/t.sys[s].star.radius
#b = calc_impact_parameter(t.sys[s].planet, p) # If limb darkening should know about which chord the planet takes set b to impact parameter, rather than 0.0.
depth = depth_at_midpoint(radius_ratio, t.sys[s].star.ld) # Includes limb darkening
depth *= flux(t.sys[s].star)/flux(t) # Flux ratio accounts for dilution
end
"""
calc_transit_duration_central_circ_small_angle_approx(ps, pl)
calc_transit_duration_central_circ_small_angle_approx(t, s, p)
Calculate transit duration of planet across host star disk if it transits across the center of the stellar disk assuming a circular orbit using the small angle approximation
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days) if planet transits across center of stellar disk
"""
function calc_transit_duration_central_circ_small_angle_approx(ps::PlanetarySystemAbstract, pl::Integer)
duration = rsol_in_au*ps.star.radius * ps.orbit[pl].P /(pi*semimajor_axis(ps,pl) )
end
calc_transit_duration_central_circ_small_angle_approx(t::KeplerTarget, s::Integer, p::Integer) = calc_transit_duration_central_circ_small_angle_approx(t.sys[s],p)
"""
calc_transit_duration_central_circ_with_arcsin(ps, pl)
calc_transit_duration_central_circ_with_arcsin(t, s, p)
Calculate transit duration of planet across host star disk if it transits across the center of the stellar disk assuming a circular orbit without the small angle approximation
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days) if planet transits across center of stellar disk
"""
function calc_transit_duration_central_circ_with_arcsin(ps::PlanetarySystemAbstract, pl::Integer)
asin_arg = rsol_in_au*ps.star.radius/semimajor_axis(ps,pl)
duration = ps.orbit[pl].P/pi * (asin_arg < 1.0 ? asin(asin_arg) : 1.0)
end
calc_transit_duration_central_circ_with_arcsin(t::KeplerTarget, s::Integer, p::Integer) = calc_transit_duration_central_circ_with_arcsin(t.sys[s],p)
"""
calc_transit_duration_central_circ(ps, pl)
calc_transit_duration_central_circ(t, s, p)
Wrapper function to calculate transit duration of planet across host star disk if it transits across the center of the stellar disk assuming a circular orbit
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days) if planet transits across center of stellar disk
"""
#calc_transit_duration_central_circ(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_central_circ_small_angle_approx(ps,pl)
calc_transit_duration_central_circ(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_central_circ_with_arcsin(ps,pl)
calc_transit_duration_central_circ(t::KeplerTarget, s::Integer, p::Integer) = calc_transit_duration_central_circ(t.sys[s],p)
"""
calc_transit_duration_central_small_angle_approx(ps, pl)
calc_transit_duration_central_small_angle_approx(t, s, p)
Calculate transit duration of planet across host star disk if it transits across the center of the stellar disk using the small angle approximation
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days) if planet transits across center of stellar disk
"""
function calc_transit_duration_central_small_angle_approx(ps::PlanetarySystemAbstract, pl::Integer)
ecc = ps.orbit[pl].ecc
sqrt_one_minus_ecc_sq = sqrt((1+ecc)*(1-ecc))
one_plus_e_sin_w = 1+ecc*sin(ps.orbit[pl].omega)
vel_fac = sqrt_one_minus_ecc_sq/one_plus_e_sin_w
duration = calc_transit_duration_central_circ_small_angle_approx(ps,pl) * vel_fac
end
calc_transit_duration_central_small_angle_approx(t::KeplerTarget, s::Integer, p::Integer) = calc_transit_duration_central_small_angle_approx(t.sys[s],p)
"""
calc_transit_duration_central_winn2010(ps, pl)
calc_transit_duration_central_winn2010(t, s, p)
Calculate transit duration of planet across host star disk if it transits across the center of the stellar disk using the formulation from Winn (2010)
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days) if planet transits across center of stellar disk
"""
function calc_transit_duration_central_winn2010(ps::PlanetarySystemAbstract, pl::Integer)
ecc = ps.orbit[pl].ecc
sqrt_one_minus_ecc_sq = sqrt((1+ecc)*(1-ecc))
one_plus_e_sin_w = 1+ecc*sin(ps.orbit[pl].omega)
vel_fac = sqrt_one_minus_ecc_sq/one_plus_e_sin_w
radial_separation_over_a = (1+ecc)*(1-ecc)/one_plus_e_sin_w
asin_arg = rsol_in_au*ps.star.radius/(semimajor_axis(ps,pl))
# Based on Winn 2010
duration = ( asin_arg<1.0 ? asin(asin_arg) : 1.0 ) * ps.orbit[pl].P*radial_separation_over_a/(pi*sqrt_one_minus_ecc_sq)
end
calc_transit_duration_central_winn2010(t::KeplerTarget, s::Integer, p::Integer) = calc_transit_duration_central_winn2010(t.sys[s],p)
"""
calc_transit_duration_central_kipping2010(ps, pl)
calc_transit_duration_central_kipping2010(t, s, p)
Calculate transit duration of planet across host star disk if it transits across the center of the stellar disk using the formulation from Kipping (2010)
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days) if planet transits across center of stellar disk
"""
function calc_transit_duration_central_kipping2010(ps::PlanetarySystemAbstract, pl::Integer)
ecc = ps.orbit[pl].ecc
sqrt_one_minus_ecc_sq = sqrt((1+ecc)*(1-ecc))
one_plus_e_sin_w = 1+ecc*sin(ps.orbit[pl].omega)
vel_fac = sqrt_one_minus_ecc_sq/one_plus_e_sin_w
radial_separation_over_a = (1+ecc)*(1-ecc)/one_plus_e_sin_w
# Based on pasting cos i = 0 into Eqn 15 from Kipping 2010
asin_arg = rsol_in_au*ps.star.radius/(semimajor_axis(ps,pl)* radial_separation_over_a)
duration = ps.orbit[pl].P*radial_separation_over_a^2/(pi*sqrt_one_minus_ecc_sq) * ( asin_arg<1.0 ? asin(asin_arg) : 1.0 )
end
calc_transit_duration_central_kipping2010(t::KeplerTarget, s::Integer, p::Integer) = calc_transit_duration_central_kipping2010(t.sys[s],p)
"""
calc_transit_duration_central(ps, pl)
calc_transit_duration_central(t, s, p)
Wrapper function to calculate transit duration of planet across host star disk if it transits across the center of the stellar disk
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days) if planet transits across center of stellar disk
"""
#calc_transit_duration_central(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_central_small_angle_approx(ps,pl)
#calc_transit_duration_central(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_central_winn2010(ps,pl)
calc_transit_duration_central(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_central_kipping2010(ps,pl)
calc_transit_duration_central(t::KeplerTarget, s::Integer, p::Integer) = calc_transit_duration_central(t.sys[s],p)
calc_transit_duration_eff_central(t::KeplerTarget, s::Integer, p::Integer) = calc_transit_duration_central(t.sys[s],p)
"""
calc_transit_duration_factor_for_impact_parameter_b(b, p)
Calculate scaling factor to convert central transit duration to transit duration given an orbit with impact parameter b
NOTE: Used for single observations of transit duration
# Arguments:
- `b::Real`: Impact parameter
- `p::Real`: Planet-to-star radius ratio
# Returns:
Transit duration scaling factor
"""
function calc_transit_duration_factor_for_impact_parameter_b(b::T, p::T) where T <:Real
@assert(zero(b)<=b) # b = Impact Parameter
@assert(zero(p)<=p<one(p)) # p = R_p/R_star
if b < 1-p
duration_ratio = sqrt((1-b)*(1+b)) # Approximation to (sqrt((1+p)^2-b^2)+sqrt((1-p)^2-b^2))/2, which is itself an approximation
else
return zero(b)
end
end
"""
calc_transit_duration_eff_factor_for_impact_parameter_b(b, p)
Calculate scaling factor to convert central transit duration to transit duration given an orbit with impact parameter b
NOTE: Used for single observations of effective transit duration for use in SNR calculations.
# Arguments:
- `b::Real`: Impact parameter
- `p::Real`: Planet-to-star radius ratio
# Returns:
Transit duration scaling factor
"""
function calc_transit_duration_eff_factor_for_impact_parameter_b(b::T, p::T) where T <:Real
@assert(zero(b)<=b) # b = Impact Parameter
@assert(zero(p)<=p<one(p)) # p = R_p/R_star
if b < 1-3p # Far enough from grazing for approximation
#duration_ratio = sqrt(1-b^2) # Approximation to (sqrt((1+p)^2-b^2)+sqrt((1-p)^2-b^2))/2, which is itself an approximation
duration_ratio = sqrt((1-b)*(1+b)) # Approximation to (sqrt((1+p)^2-b^2)+sqrt((1-p)^2-b^2))/2, which is itself an approximation
elseif b < 1-p # Planet is fully inscribed at mid-transit
#duration_ratio = (sqrt((1+p)^2-b^2)+sqrt((1-p)^2-b^2))/2 # Average of full and flat transit durations approximates to duration between center of planet being over limb of star
duration_ratio = (sqrt(((1+p)+b)*((1+p)-b))+sqrt(((1-p)+b)*((1-p)-b)))/2 # Average of full and flat transit durations approximates to duration between center of planet being over limb of star
elseif b < 1+p # Planet never fully inscribed by star
#duration_ratio = sqrt((1+p)^2-b^2)/2 # /2 since now triangular
duration_ratio = sqrt(((1+p)+b)*((1+p)-b))/2 # /2 since now triangular
else # There's no transit
duration_ratio = zero(b)
end
return duration_ratio
end
"""
calc_effective_transit_duration_factor_for_impact_parameter_b(b, p)
Calculate scaling factor to convert central transit duration to transit duration given an orbit with impact parameter b
NOTE: Used for sky-averaging observations where multiple transit durations are sampled for different values of b.
# Arguments:
- `b::Real`: Impact parameter
- `p::Real`: Planet-to-star radius ratio
# Returns:
Transit duration scaling factor
"""
function calc_effective_transit_duration_factor_for_impact_parameter_b(b::T, p::T) where T <:Real
@assert(zero(b)<=b) # b = Impact Parameter
@assert(zero(p)<=p<one(p)) # p = R_p/R_star
if b < 1-3p # Far enough from grazing for approximation
duration_ratio = sqrt((1+b)*(1-b)) # Approximation to (sqrt((1+p)^2-b^2)+sqrt((1-p)^2-b^2))/2, which is itself an approximation
area_ratio = one(p)
elseif b < 1-p # Planet is fully inscribed at mid-transit
#duration_ratio = (sqrt((1+p)^2-b^2)+sqrt((1-p)^2-b^2))/2 # Average of full and flat transit durations approximates to duration between center of planet being over limb of star
duration_ratio = (sqrt(((1+p)+b)*((1+p)-b))+sqrt(((1-p)+b)*((1-p)-b)))/2 # Average of full and flat transit durations approximates to duration between center of planet being over limb of star
area_ratio = one(p)
elseif b < 1+p # Planet never fully inscribed by star
#duration_ratio = sqrt((1+p)^2-b^2)/2 # /2 since now triangular
duration_ratio = sqrt(((1+p)+b)*((1+p)-b))/2 # /2 since now triangular
#area_ratio = (p^2*acos((b^2+p^2-1)/(2*b*p))+acos((b^2+1-p^2)/(2b))-0.5*sqrt((1+p-b)*(p+b-1)*(1-p+b)*(1+p+b))) / (pi*p^2)
#area_ratio = (p^2*acos((b^2-(1-p)*(1+p))/(2*b*p))+acos((b^2+(1+p)*(1-p))/(2b))-0.5*sqrt((1+p-b)*(p+b-1)*(1-p+b)*(1+p+b))) / (pi*p^2)
acos_arg1 = max(-1.0,min(1.0,(b^2-(1-p)*(1+p))/(2*b*p)))
acos_arg2 = max(-1.0,min(1.0,(b^2+(1+p)*(1-p))/(2b)))
sqrt_arg = max(0.0,(1+p-b)*(p+b-1)*(1-p+b)*(1+p+b))
area_ratio = (p^2*acos(acos_arg1)+acos(acos_arg2)-0.5*sqrt(sqrt_arg)) / (pi*p^2)
else # There's no transit
duration_ratio = zero(b)
area_ratio = zero(p)
end
return duration_ratio*area_ratio
end
"""
calc_depth_correction_for_grazing_transit(b, p)
Calculate scaling factor to correct transit depth to account for grazing transits when only part of the planet disk blocks starlight at mid-transit. Source: Eqn 14 of http://mathworld.wolfram.com/Circle-CircleIntersection.html
NOTE: Assumes uniform surface brightness star
# Arguments:
- `b::Real`: Impact parameter
- `p::Real`: Planet-to-star radius ratio
# Returns:
Transit depth scaling factor
"""
function calc_depth_correction_for_grazing_transit(b::T, p::T) where T <:Real
@assert(zero(b)<=b) # b = Impact Parameter
@assert(zero(p)<=p<one(p)) # p = R_p/R_star
if b < 1-p # Planet fully inscribed by star
area_ratio = one(p)
elseif b < 1+p # Planet never fully inscribed by star
#area_ratio = (p^2*acos((b^2+p^2-1)/(2*b*p))+acos((b^2+1-p^2)/(2b))-0.5*sqrt((1+p-b)*(p+b-1)*(1-p+b)*(1+p+b))) / (pi*p^2)
acos_arg1 = max(-1.0,min(1.0,(b^2-(1-p)*(1+p))/(2*b*p)))
acos_arg2 = max(-1.0,min(1.0,(b^2+(1+p)*(1-p))/(2b)))
sqrt_arg = max(0.0,(1+p-b)*(p+b-1)*(1-p+b)*(1+p+b))
area_ratio = (p^2*acos(acos_arg1)+acos(acos_arg2)-0.5*sqrt(sqrt_arg)) / (pi*p^2)
else # There's no transit
area_ratio = zero(p)
end
return area_ratio
end
# Transit durations to be used for observations of transit duration
"""
calc_transit_duration_small_angle_approx(ps, pl)
calc_transit_duration_small_angle_approx(t, s, p)
Calculate transit duration of planet across host star disk using the small angle approximation
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days)
"""
function calc_transit_duration_small_angle_approx(ps::PlanetarySystemAbstract, pl::Integer)
a = semimajor_axis(ps,pl)
@assert a>=zero(a)
ecc = ps.orbit[pl].ecc
@assert zero(ecc)<=ecc<=one(ecc)
b = calc_impact_parameter(ps, pl)
size_ratio = ps.planet[pl].radius/ps.star.radius
@assert !isnan(b)
@assert zero(b)<=b
if b>one(b)-size_ratio
return zero(b)
end
duration_central_circ = calc_transit_duration_central_circ(ps,pl)
duration_ratio_for_impact_parameter = calc_transit_duration_factor_for_impact_parameter_b(b,size_ratio)
one_plus_e_sin_w = 1+ecc*sin(ps.orbit[pl].omega)
sqrt_one_minus_e_sq = sqrt((1+ecc)*(1-ecc))
vel_fac = sqrt_one_minus_e_sq / one_plus_e_sin_w
duration = duration_central_circ * duration_ratio_for_impact_parameter * vel_fac
end
calc_transit_duration_small_angle_approx(t::KeplerTarget, s::Integer, p::Integer ) = calc_transit_duration_small_angle_approx(t.sys[s],p)
"""
calc_transit_duration_central_winn2010(ps, pl)
calc_transit_duration_central_winn2010(t, s, p)
Calculate transit duration of planet across host star disk using the formulation from Winn (2010)
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days)
"""
function calc_transit_duration_winn2010(ps::PlanetarySystemAbstract, pl::Integer)
a = semimajor_axis(ps,pl)
@assert a>=zero(a)
ecc = ps.orbit[pl].ecc
@assert zero(ecc)<=ecc<=one(ecc)
b = calc_impact_parameter(ps, pl)
size_ratio = ps.planet[pl].radius/ps.star.radius
@assert !isnan(b)
@assert zero(b)<=b
if b>one(b)-size_ratio
return zero(b)
end
duration_central_circ = calc_transit_duration_central_circ(ps,pl)
arcsin_circ_central = pi/ps.orbit[pl].P*duration_central_circ
one_plus_e_sin_w = 1+ecc*sin(ps.orbit[pl].omega)
sqrt_one_minus_e_sq = sqrt((1+ecc)*(1-ecc))
vel_fac = sqrt_one_minus_e_sq / one_plus_e_sin_w
radial_separation_over_a = (1+ecc)*(1-ecc)/one_plus_e_sin_w
duration_ratio_for_impact_parameter = calc_transit_duration_factor_for_impact_parameter_b(b,size_ratio)
# WARNING: This is technically an approximation. It avoids small angle for non-grazing transits, but does use a variant of the small angle approximation for nearly grazing transits.
asin_arg = (arcsin_circ_central * duration_ratio_for_impact_parameter)
duration = ps.orbit[pl].P/pi * radial_separation_over_a/sqrt_one_minus_e_sq * (asin_arg < 1.0 ? asin(asin_arg) : 1.0)
duration = duration_central_cric * radial_separation_over_a/sqrt_one_minus_e_sq * (asin_arg < 1.0 ? asin(asin_arg) : 1.0)/arcsin_circ_central
end
calc_transit_duration_winn2010(t::KeplerTarget, s::Integer, p::Integer ) = calc_transit_duration_winn2010(t.sys[s],p)
"""
calc_transit_duration_central_kipping2010(ps, pl)
calc_transit_duration_central_kipping2010(t, s, p)
Calculate transit duration of planet across host star disk using the formulation from Kipping (2010)
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days)
"""
function calc_transit_duration_kipping2010(ps::PlanetarySystemAbstract, pl::Integer)
a = semimajor_axis(ps,pl)
@assert a>=zero(a)
ecc = ps.orbit[pl].ecc
@assert zero(ecc)<=ecc<=one(ecc)
b = calc_impact_parameter(ps, pl)
size_ratio = ps.planet[pl].radius/ps.star.radius
@assert !isnan(b)
@assert zero(b)<=b
if b>one(b)-size_ratio
return zero(b)
end
duration_central_circ = calc_transit_duration_central_circ(ps,pl)
arcsin_circ_central = pi/ps.orbit[pl].P*duration_central_circ
one_plus_e_sin_w = 1+ecc*sin(ps.orbit[pl].omega)
sqrt_one_minus_e_sq = sqrt((1+ecc)*(1-ecc))
vel_fac = sqrt_one_minus_e_sq / one_plus_e_sin_w
radial_separation_over_a = (1+ecc)*(1-ecc)/one_plus_e_sin_w
duration_ratio_for_impact_parameter = calc_transit_duration_factor_for_impact_parameter_b(b,size_ratio)
# WARNING: This is technically an approximation (see Kipping 2010 Eqn 15). It avoids small angle for non-grazing transits, but does use a variant of the small angle approximation for nearly grazing transits.
asin_arg = (arcsin_circ_central * duration_ratio_for_impact_parameter/radial_separation_over_a)
duration = ps.orbit[pl].P/pi * radial_separation_over_a^2/sqrt_one_minus_e_sq * (asin_arg < 1.0 ? asin(asin_arg) : 1.0)
#duration = duration_central_circ * radial_separation_over_a^2/sqrt_one_minus_e_sq * (asin_arg < 1.0 ? asin(asin_arg) : 1.0)/arcsin_circ_central
end
calc_transit_duration_kipping2010(t::KeplerTarget, s::Integer, p::Integer ) = calc_transit_duration_kipping2010(t.sys[s],p)
"""
calc_transit_duration(ps, pl)
calc_transit_duration(t, s, p)
Wrapper function to calculate transit duration of planet across host star disk
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Transit duration (in days)
"""
#calc_transit_duration(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_small_angle_approx(ps,pl)
#calc_transit_duration(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_winn2010(ps,pl)
calc_transit_duration(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_kipping2010(ps,pl)
calc_transit_duration(t::KeplerTarget, s::Integer, p::Integer ) = calc_transit_duration(t.sys[s],p)
# Effective transit durations to be used for SNR calculations
"""
calc_transit_duration_eff_small_angle_approx(ps, pl)
calc_transit_duration_eff_small_angle_approx(t, s, p)
Calculate effective transit duration of planet across host star disk using the small angle approximation
NOTE: To be used for SNR calculations
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Effective transit duration (in days)
"""
function calc_transit_duration_eff_small_angle_approx(ps::PlanetarySystemAbstract, pl::Integer)
a = semimajor_axis(ps,pl)
@assert a>=zero(a)
ecc = ps.orbit[pl].ecc
@assert zero(ecc)<=ecc<=one(ecc)
b = calc_impact_parameter(ps, pl)
size_ratio = ps.planet[pl].radius/ps.star.radius
@assert !isnan(b)
@assert zero(b)<=b
if b>one(b)+size_ratio
return zero(b)
end
duration_central_circ = calc_transit_duration_central_circ(ps,pl)
duration_ratio_for_impact_parameter = calc_transit_duration_eff_factor_for_impact_parameter_b(b,size_ratio)
one_plus_e_sin_w = 1+ecc*sin(ps.orbit[pl].omega)
sqrt_one_minus_e_sq = sqrt((1+ecc)*(1-ecc))
vel_fac = sqrt_one_minus_e_sq / one_plus_e_sin_w
duration = duration_central_circ * duration_ratio_for_impact_parameter * vel_fac
end
calc_transit_duration_eff_small_angle_approx(t::KeplerTarget, s::Integer, p::Integer ) = calc_transit_duration_eff_small_angle_approx(t.sys[s],p)
"""
calc_transit_duration_eff_winn2010(ps, pl)
calc_transit_duration_eff_winn2010(t, s, p)
Calculate effective transit duration of planet across host star disk using the formulation from Winn (2010)
NOTE: To be used for SNR calculations
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Effective transit duration (in days)
"""
function calc_transit_duration_eff_winn2010(ps::PlanetarySystemAbstract, pl::Integer)
a = semimajor_axis(ps,pl)
@assert a>=zero(a)
ecc = ps.orbit[pl].ecc
@assert zero(ecc)<=ecc<=one(ecc)
b = calc_impact_parameter(ps, pl)
size_ratio = ps.planet[pl].radius/ps.star.radius
@assert !isnan(b)
@assert zero(b)<=b
if b>one(b)+size_ratio
return zero(b)
end
duration_central_circ = calc_transit_duration_central_circ(ps,pl)
arcsin_circ_central = pi/ps.orbit[pl].P*duration_central_circ
one_plus_e_sin_w = 1+ecc*sin(ps.orbit[pl].omega)
sqrt_one_minus_e_sq = sqrt((1+ecc)*(1-ecc))
vel_fac = sqrt_one_minus_e_sq / one_plus_e_sin_w
radial_separation_over_a = (1+ecc)*(1-ecc)/one_plus_e_sin_w
duration_ratio_for_impact_parameter = calc_transit_duration_eff_factor_for_impact_parameter_b(b,size_ratio)
# WARNING: This is technically an approximation. It avoids small angle for non-grazing transits, but does use a variant of the small angle approximation for nearly and grazing transits.
asin_arg = (arcsin_circ_central * duration_ratio_for_impact_parameter)
duration = ps.orbit[pl].P/pi * radial_separation_over_a/sqrt_one_minus_e_sq * (asin_arg < 1.0 ? asin(asin_arg) : 1.0)
duration = duration_central_cric * radial_separation_over_a/sqrt_one_minus_e_sq * (asin_arg < 1.0 ? asin(asin_arg) : 1.0)/arcsin_circ_central
end
calc_transit_duration_eff_winn2010(t::KeplerTarget, s::Integer, p::Integer ) = calc_transit_duration_eff_winn2010(t.sys[s],p)
"""
calc_transit_duration_eff_kipping2010(ps, pl)
calc_transit_duration_eff_kipping2010(t, s, p)
Calculate effective transit duration of planet across host star disk using the formulation from Kipping (2010)
NOTE: To be used for SNR calculations
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Effective transit duration (in days)
"""
function calc_transit_duration_eff_kipping2010(ps::PlanetarySystemAbstract, pl::Integer)
a = semimajor_axis(ps,pl)
@assert a>=zero(a)
ecc = ps.orbit[pl].ecc
@assert zero(ecc)<=ecc<=one(ecc)
b = calc_impact_parameter(ps, pl)
size_ratio = ps.planet[pl].radius/ps.star.radius
@assert !isnan(b)
@assert zero(b)<=b
if b>one(b)+size_ratio
return zero(b)
end
duration_central_circ = calc_transit_duration_central_circ(ps,pl)
arcsin_circ_central = pi/ps.orbit[pl].P*duration_central_circ
one_plus_e_sin_w = 1+ecc*sin(ps.orbit[pl].omega)
sqrt_one_minus_e_sq = sqrt((1+ecc)*(1-ecc))
vel_fac = sqrt_one_minus_e_sq / one_plus_e_sin_w
radial_separation_over_a = (1+ecc)*(1-ecc)/one_plus_e_sin_w
duration_ratio_for_impact_parameter = calc_transit_duration_eff_factor_for_impact_parameter_b(b,size_ratio)
# WARNING: This is technically an approximation (see Kipping 2010 Eqn 15). It avoids small angle for non-grazing transits, but does use a variant of the small angle approximation for nearly and grazing transits.
asin_arg = (arcsin_circ_central * duration_ratio_for_impact_parameter/radial_separation_over_a)
duration = ps.orbit[pl].P/pi * radial_separation_over_a^2/sqrt_one_minus_e_sq * (asin_arg < 1.0 ? asin(asin_arg) : 1.0)
#duration = duration_central_circ * radial_separation_over_a^2/sqrt_one_minus_e_sq * (asin_arg < 1.0 ? asin(asin_arg) : 1.0)/arcsin_circ_central
end
calc_transit_duration_eff_kipping2010(t::KeplerTarget, s::Integer, p::Integer ) = calc_transit_duration_eff_kipping2010(t.sys[s],p)
"""
calc_transit_duration_eff(ps, pl)
calc_transit_duration_eff(t, s, p)
Wrapper function to calculate effective transit duration of planet across host star disk to be used for SNR calculations
# Arguments:
- `ps::PlanetarySystemAbstract`: Planetary system object
- `pl::Integer`: Planet/orbit index in planetary system object
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
# Returns:
Effective transit duration (in days)
"""
#calc_transit_duration_eff(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_eff_small_angle_approx(ps,pl)
#calc_transit_duration_eff(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_eff_winn2010(ps,pl)
calc_transit_duration_eff(ps::PlanetarySystemAbstract, pl::Integer) = calc_transit_duration_eff_kipping2010(ps,pl)
calc_transit_duration_eff(t::KeplerTarget, s::Integer, p::Integer ) = calc_transit_duration_eff(t.sys[s],p)
"""
calc_transit_duration_eff(t, s, p, sim_param)
Calculates (expected) number of transits observed for planet
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
- `sim_param::SimParam`: Simulation parameter object
# Returns:
Effective (expected) number of transits
"""
function calc_expected_num_transits(t::KeplerTarget, s::Integer, p::Integer, sim_param::SimParam)
period = t.sys[s].orbit[p].P
exp_num_transits = t.duty_cycle * t.data_span/period
return exp_num_transits
end
"""
get_durations_searched_Kepler(period, duration)
Returns appropriate transit duration for use with 1-sigma depth function (or CDPP) given the range of transit durations searched by the Kepler pipeline.
# Arguments:
- `period::Float64`: Orbital period (in days)
- `duration::Float64`: Transit duration (in days)
# Returns:
Valid transit duration searched by Kepler pipeline that is closest to the actual transit duration input to the function.
"""
#function get_legal_durations(period::Float64,duration::Float64)
function get_durations_searched_Kepler(period::Float64,duration::Float64)
num_dur = length(cdpp_durations) # 14
min_duration = 0.0
max_duration = 0.0
min_periods = [0.5, 0.52, 0.6517, 0.7824, 0.912, 1.178, 1.3056, 1.567, 1.952, 2.343, 2.75, 3.14, 3.257, 3.91]
max_periods = [50.045, 118.626, 231.69, 400.359, 635.76, 725, 725, 725, 725, 725, 725, 725, 725, 725]
i = 1
#determine what maximum and minimum durations were searched for this period
while min_duration == 0.0 || max_duration == 0.0
if i > 14
@warn "No duration searched match this period and duration." period duration
return (min_duration > 0.0) ? min_duration/24 : 0.0
end
if period <= max_periods[i] && min_duration == 0.0
min_duration = cdpp_durations[i]
end
if period >= min_periods[num_dur+1-i] && max_duration == 0.0
max_duration = cdpp_durations[num_dur+1-i]
end
i+=1
end
if duration<=max_duration/24 && duration>=min_duration/24
return duration
elseif duration>=max_duration/24
return max_duration/24
elseif duration <=min_duration/24
return min_duration/24
end
end
include("transit_detection_model.jl")
include("transit_prob_geometric.jl")
const has_sc_bit_array_size = 7*8 # WARNING: Must be big enough given value of num_quarters (assumed to be <=17)
mutable struct KeplerTargetObs # QUERY: Do we want to make this type depend on whether the catalog is based on simulated or real data?
obs::Vector{TransitPlanetObs}
sigma::Vector{TransitPlanetObs} # Simplistic approach to uncertainties for now. QUERY: Should estimated uncertainties be part of Observations type?
# phys_id::Vector{Tuple{Int32,Int32}} # So we can lookup the system's properties # Commented out since Not used
prob_detect::SystemDetectionProbsAbstract # QUERY: Specialize type of prob_detect depending on whether for simulated or real data?
has_sc::BitArray{1} # Note: Changed from Array{Bool}. Alternatively, we could try StaticArray{Bool} so fixed size? Do we even need to keep this?
star::StarObs
end
#KeplerTargetObs(n::Integer) = KeplerTargetObs( fill(TransitPlanetObs(),n), fill(TransitPlanetObs(),n), fill(tuple(0,0),n), ObservedSystemDetectionProbsEmpty(), fill(false,num_quarters), StarObs(0.0,0.0) )
KeplerTargetObs(n::Integer) = KeplerTargetObs( fill(TransitPlanetObs(),n), fill(TransitPlanetObs(),n), ObservedSystemDetectionProbsEmpty(), falses(has_sc_bit_array_size), StarObs(0.0,0.0,0) )
num_planets(t::KeplerTargetObs) = length(t.obs)
"""
calc_target_obs_sky_ave(t, sim_param)
Simulate observation of Kepler target (with associated planets) and return observed quantities and detection probabilities of all planets averaging over all viewing angles.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `sim_param::SimParam`: Simulation parameter object, must contain the following values:
- max_tranets_in_sys: Maximum number of transiting planets per Kepler target
- transit_noise_model: Function to use for simulating noise in the observed properties of the transiting planets
# Returns:
Kepler observable object containing observed properties and detection probabilities for every planet
"""
function calc_target_obs_sky_ave(t::KeplerTarget, sim_param::SimParam)
max_tranets_in_sys = get_int(sim_param,"max_tranets_in_sys")
transit_noise_model = get_function(sim_param,"transit_noise_model")
min_detect_prob_to_be_included = 0.0 # get_real(sim_param,"min_detect_prob_to_be_included")
num_observer_samples = 1 # get_int(sim_param,"num_viewing_geometry_samples")
vetting_efficiency = get_function(sim_param,"vetting_efficiency")
np = num_planets(t)
obs = Array{TransitPlanetObs}(undef,np)
sigma = Array{TransitPlanetObs}(undef,np)
#id = Array{Tuple{Int32,Int32}}(np)
#id = Array{Tuple{Int32,Int32}}(np)
ns = length(t.sys)
sdp_sys = Array{SystemDetectionProbsAbstract}(undef,ns)
i = 1
for (s,sys) in enumerate(t.sys)
pdet = zeros(num_planets(sys))
for (p,planet) in enumerate(sys.planet)
if get(sim_param,"verbose",false)
println("# s=",s, " p=",p," num_sys= ",length(t.sys), " num_pl= ",num_planets(sys) )
end
period = sys.orbit[p].P
duration_central = calc_transit_duration_central(t,s,p)
size_ratio = t.sys[s].planet[p].radius/t.sys[s].star.radius
depth = calc_transit_depth(t,s,p)
ntr = calc_expected_num_transits(t, s, p, sim_param)
# cdpp_central = interpolate_cdpp_to_duration(t, duration_central)
# snr_central = calc_snr_if_transit_cdpp(t, depth, duration_central, cdpp_central, sim_param, num_transit=ntr)
# pdet_ave = calc_ave_prob_detect_if_transit_from_snr_cdpp(t, snr_central, period, duration_central, size_ratio, cdpp_central, sim_param, num_transit=ntr)
kepid = StellarTable.star_table(t.sys[s].star.id, :kepid)
osd_duration_central = get_durations_searched_Kepler(period,duration_central) #tests if durations are included in Kepler's observations for a certain planet period. If not, returns nearest possible duration
osd_central = WindowFunction.interp_OSD_from_table(kepid, period, osd_duration_central)
if osd_duration_central > duration_central #use a correcting factor if this duration is lower than the minimum searched for this period.
osd_central = osd_central*osd_duration_central/duration_central
end
snr_central = calc_snr_if_transit(t, depth, duration_central, osd_central, sim_param, num_transit=ntr)
pdet_ave = calc_ave_prob_detect_if_transit_from_snr(t, snr_central, period, duration_central, size_ratio, osd_central, sim_param, num_transit=ntr)
add_to_catalog = pdet_ave > min_detect_prob_to_be_included # Include all planets with sufficient detection probability
if add_to_catalog
pdet_central = calc_prob_detect_if_transit(t, snr_central, period, duration_central, sim_param, num_transit=ntr)
threshold_pdet_ratio = rand()
hard_max_num_b_tries = 100
max_num_b_tries = min_detect_prob_to_be_included == 0. ? hard_max_num_b_tries : min(hard_max_num_b_tries,convert(Int64,1/min_detect_prob_to_be_included))
# We compute measurement noise based on a single value of b. We draw from a uniform distribution for b and then using rejection sampling to reduce probability of higher impact parameters
pdet_this_b = 0.0
for j in 1:max_num_b_tries
b = rand()
transit_duration_factor = calc_effective_transit_duration_factor_for_impact_parameter_b(b,size_ratio)
duration = duration_central * transit_duration_factor # WARNING: Technically, this duration may be slightly reduced for grazing cases to account for reduction in SNR due to planet not being completely inscribed by star at mid-transit. But this will be a smaller effect than limb-darkening for grazing transits. Also, makes a variant of the small angle approximation
# cdpp = interpolate_cdpp_to_duration(t, duration)
# snr = snr_central * (cdpp_central/cdpp) * sqrt(transit_duration_factor)
osd_duration = get_durations_searched_Kepler(period,duration) #tests if durations are included in Kepler's observations for a certain planet period. If not, returns nearest possible duration
osd = WindowFunction.interp_OSD_from_table(kepid, period, osd_duration)
if osd_duration > duration #use a correcting factor if this duration is lower than the minimum searched for this period.
osd = osd*osd_duration/duration
end
snr = snr_central * (osd_central/osd)
pdet_this_b = calc_prob_detect_if_transit(t, snr, period, duration, sim_param, num_transit=ntr)
pvet = vetting_efficiency(t.sys[s].planet[p].radius, period)
if pdet_this_b >= threshold_pdet_ratio * pdet_central
#println("# Adding pdet_this_b = ", pdet_this_b, " pdet_c = ", pdet_central, " snr= ",snr, " cdpp= ",cdpp, " duration= ",duration, " b=",b, " u01= ", threshold_pdet_ratio)
pdet[p] = pdet_ave*pvet
#####
duration_central = calc_transit_duration_central(t,s,p)
transit_duration_factor = calc_effective_transit_duration_factor_for_impact_parameter_b(b,size_ratio)
duration = duration_central * transit_duration_factor # WARNING: Makes a variant of the small angle approximation
####
obs[i], sigma[i] = transit_noise_model(t, s, p, depth, duration, snr, ntr, b=b)
i += 1
break
end # if add to obs and sigma lists
end # for j
else # add_to_catalog
# Do anything for planets that are extremely unlikely to be detected even if they were to transit?
end
end
resize!(obs,i-1)
resize!(sigma,i-1)
sdp_sys[s] = calc_simulated_system_detection_probs(sys, pdet, max_tranets_in_sys=max_tranets_in_sys, min_detect_prob_to_be_included=min_detect_prob_to_be_included, num_samples=num_observer_samples)
end
# TODO SCI DETAIL: Combine sdp_sys to allow for target to have multiple planetary systems
s1 = findfirst(x->num_planets(x)>0,sdp_sys) # WARNING IMPORTANT: For now just take first system with planets
if s1 == nothing
s1 = 1
end
sdp_target = sdp_sys[s1]
has_no_sc = falses(has_sc_bit_array_size)
star_obs = StarObs( t.sys[1].star.radius, t.sys[1].star.mass, t.sys[1].star.id ) # NOTE: This sets the observed star properties to be those in the stellar catalog. If want to incorporate uncertainty in stellar properties, that would be done elsewhere when translating depths into planet radii.
#return KeplerTargetObs(obs, sigma, id, sdp_target, has_no_sc, star_obs )
return KeplerTargetObs(obs, sigma, sdp_target, has_no_sc, star_obs )
end
"""
calc_target_obs_single_obs(t, sim_param)
Simulate observation of Kepler target (with associated planets) and return observed quantities and detection probabilities of all planets assuming a single viewpoint.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `sim_param::SimParam`: Simulation parameter object, must contain the following values:
- max_tranets_in_sys: Maximum number of transiting planets per Kepler target
- transit_noise_model: Function to use for simulating noise in the observed properties of the transiting planets
# Returns:
Kepler observable object containing observed properties and detection probabilities for every planet
"""
function calc_target_obs_single_obs(t::KeplerTarget, sim_param::SimParam)
# max_tranets_in_sys = get_int(sim_param,"max_tranets_in_sys")
transit_noise_model = get_function(sim_param,"transit_noise_model")
min_detect_prob_to_be_included = 0.0 # get_real(sim_param,"min_detect_prob_to_be_included")
transit_noise_model = get_function(sim_param,"transit_noise_model")
np = num_planets(t)
obs = Array{TransitPlanetObs}(undef,np)
sigma = Array{TransitPlanetObs}(undef,np)
ns = length(t.sys)
sdp_sys = Array{ObservedSystemDetectionProbs}(undef,ns)
i = 1
cuantos = 1000 #indicator for testing OSD interpolator.
for (s,sys) in enumerate(t.sys)
pdet = zeros(num_planets(sys))
for (p,planet) in enumerate(sys.planet)
if get(sim_param,"verbose",false)
println("# s=",s, " p=",p," num_sys= ",length(t.sys), " num_pl= ",num_planets(sys) )
end
duration = calc_transit_duration_eff(t,s,p)
if duration <= 0.
continue
end
period = sys.orbit[p].P
ntr = calc_expected_num_transits(t, s, p, sim_param)
depth = calc_transit_depth(t,s,p)
# Apply correction to snr if grazing transit
size_ratio = t.sys[s].planet[p].radius/t.sys[s].star.radius
b = calc_impact_parameter(t.sys[s],p)
snr_correction = calc_depth_correction_for_grazing_transit(b,size_ratio)
depth *= snr_correction
# cdpp = interpolate_cdpp_to_duration(t, duration)
# snr = calc_snr_if_transit_cdpp(t, depth, duration, cdpp, sim_param, num_transit=ntr)
kepid = StellarTable.star_table(t.sys[s].star.id, :kepid)
osd_duration = get_durations_searched_Kepler(period,duration) #tests if durations are included in Kepler's observations for a certain planet period. If not, returns nearest possible duration
osd = WindowFunction.interp_OSD_from_table(kepid, period, osd_duration)
if osd_duration > duration #use a correcting factor if this duration is lower than the minimum searched for this period.
osd = osd*osd_duration/duration
end
snr = calc_snr_if_transit(t, depth, duration, osd, sim_param, num_transit=ntr)
pdet[p] = calc_prob_detect_if_transit(t, snr, period, duration, sim_param, num_transit=ntr)
if pdet[p] > min_detect_prob_to_be_included
pvet = vetting_efficiency(t.sys[s].planet[p].radius, period)
pdet[p] *= pvet
duration = calc_transit_duration(t,s,p)
obs[i], sigma[i] = transit_noise_model(t, s, p, depth, duration, snr, ntr)
i += 1
end
end
resize!(obs,i-1)
resize!(sigma,i-1)
sdp_sys[s] = ObservedSystemDetectionProbs(pdet)
end
# TODO SCI DETAIL: Combine sdp_sys to allow for target to have multiple planetary systems
s1 = findfirst(x->num_planets(x)>0,sdp_sys) # WARNING: For now just take first system with planets, assumes not two stars wht planets in one target
if s1 == nothing
s1 = 1
end
sdp_target = sdp_sys[s1]
has_no_sc = falses(3*num_quarters)
star_obs = StarObs( t.sys[1].star.radius, t.sys[1].star.mass, t.sys[1].star.id ) # NOTE: This just copies star properties directly
return KeplerTargetObs(obs, sigma, sdp_target, has_no_sc, star_obs )
end
function test_transit_observations(sim_param::SimParam; verbose::Bool=false) # TODO TEST: Add more tests
#transit_param = TransitParameter( EphemerisLinear(10.0, 0.0), TransitShape(0.01, 3.0/24.0, 0.5) )
generate_kepler_target = get_function(sim_param,"generate_kepler_target")
max_it = 100000
local obs
for i in 1:max_it
target = generate_kepler_target(sim_param)::KeplerTarget
while num_planets(target) == 0
target = generate_kepler_target(sim_param)::KeplerTarget
end
#calc_transit_prob_single_planet_one_obs(target,1,1)
calc_transit_prob_single_planet_obs_ave(target,1,1)
obs = calc_target_obs_single_obs(target,sim_param)
obs = calc_target_obs_sky_ave(target,sim_param)
if verbose && (num_planets(obs) > 0)
println("# i= ",string(i)," np= ",num_planets(obs), " obs= ", obs )
break
end
end
return obs
end
randtn() = rand(truncated(Normal(0.0,1.0),-0.999,0.999))
"""
transit_noise_model_no_noise(t, s, p, depth, duration, snr, num_tr, b = 0.0)
Transit noise model that assumes no additional noise introduced to observations.
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
- `depth::Float64`: (Fractional) transit depth
- `duration::Float64`: Transit duration (in days)
- `snr::Float64`: Expected multiple event statistic (signal-to-noise ratio) of transit
- `num_tr::Float64`: Expected number of transits observed
- `b::Float64 = 0.0`: Impact parameter of planet's orbit
# Returns
- TransitPlanetObs object containing simulated period, t0, transit depth, and transit duration
- TransitPlanetObs object containing simulated uncertainties for period, t0, transit depth, and transit duration
"""
function transit_noise_model_no_noise(t::KeplerTarget, s::Integer, p::Integer, depth::Float64, duration::Float64, snr::Float64, num_tr::Float64; b::Float64 = 0.0)
period = t.sys[s].orbit[p].P
t0 = period*rand() # WARNING: Not being calculated from orbit
sigma_period = 0.0
sigma_t0 = 0.0
sigma_depth = 0.0
sigma_duration = 0.0
sigma = TransitPlanetObs( sigma_period, sigma_t0, sigma_depth, sigma_duration )
obs = TransitPlanetObs( period, t0, depth,duration)
return obs, sigma
end
"""
transit_noise_model_no_noise(t, s, p, depth, duration, snr, num_tr, b = 0.0)
Transit noise model that assumes fixed amounts of uncertainty on observed transit properties. Uncertainties are as follows:
- Period = 10^-6
- t0 = 10^-4
- Depth = 10^-1
- Duration = 10^-2
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
- `depth::Float64`: (Fractional) transit depth
- `duration::Float64`: Transit duration (in days)
- `snr::Float64`: Expected multiple event statistic (signal-to-noise ratio) of transit
- `num_tr::Float64`: Expected number of transits observed
- `b::Float64 = 0.0`: Impact parameter of planet's orbit
# Returns
- TransitPlanetObs object containing simulated period, t0, transit depth, and transit duration
- TransitPlanetObs object containing simulated uncertainties for period, t0, transit depth, and transit duration
"""
function transit_noise_model_fixed_noise(t::KeplerTarget, s::Integer, p::Integer, depth::Float64, duration::Float64, snr::Float64, num_tr::Float64; b::Float64 = 0.0)
period = t.sys[s].orbit[p].P
t0 = period*rand() # WARNING: Not being calculated from orbit
sigma_period = 1e-6
sigma_t0 = 1e-4
sigma_depth = 0.1
sigma_duration = 0.01
sigma = TransitPlanetObs( sigma_period, sigma_t0, sigma_depth, sigma_duration)
#obs = TransitPlanetObs( period, t0, depth, duration)
obs = TransitPlanetObs( period*(1.0+sigma.period*randtn()), t0*(1.0+sigma.period*randtn()), depth*(1.0+sigma.depth*randtn()),duration*(1.0+sigma.duration*randtn()))
return obs, sigma
end
#make_matrix_pos_def_count = 0
function make_matrix_pos_def(A::Union{AbstractArray{T1,2},Symmetric{AbstractArray{T1,2}}}; verbose::Bool = false) where {T1<:Real}
@assert size(A,1) == size(A,2)
#global make_matrix_pos_def_count
A = (typeof(A) <: Symmetric) ? A : Symmetric(A)
smallest_eigval = eigvals(A,1:1)[1]
if smallest_eigval > 0.0
return PDMat(A)
else
#make_matrix_pos_def_count += 1
ridge = 1.01 * abs(smallest_eigval)
if verbose
println("# Warning: Adding ridge (",ridge,") to matrix w/ eigenvalue ", smallest_eigval," (#", make_matrix_pos_def_count,").")
end
return PDMat(A + Diagonal(ridge*ones(size(A,1))))
end
end
"""
transit_noise_model_diagonal(t, s, p, depth, duration, snr, num_tr, b = calc_impact_parameter(t.sys[s], p))
Transit noise model that uses the Fisher information matrix formulation assuming finite integration from Price & Rogers (2014).
# Arguments:
- `t::KeplerTarget`: Kepler target object
- `s::Integer`: Index of star in Kepler target
- `p::Integer`: Index of planet in Kepler target
- `depth::Float64`: (Fractional) transit depth
- `duration::Float64`: Transit duration (in days)
- `snr::Float64`: Expected multiple event statistic (signal-to-noise ratio) of transit
- `num_tr::Float64`: Expected number of transits observed
- `b::Float64 = calc_impact_parameter(t.sys[s], p)`: Impact parameter of planet's orbit
# Returns
- TransitPlanetObs object containing simulated period, t0, transit depth, and transit duration
- TransitPlanetObs object containing simulated uncertainties for period, t0, transit depth, and transit duration
"""
function transit_noise_model_diagonal(t::KeplerTarget, s::Integer, p::Integer, depth::Float64, duration::Float64, snr::Float64, num_tr::Float64; b::Float64 = calc_impact_parameter(t.sys[s],p) )
transit_noise_model_price_rogers(t, s, p, depth, duration, snr, num_tr; b=b, diagonal=true )
end
function transit_noise_model_price_rogers(t::KeplerTarget, s::Integer, p::Integer, depth::Float64, duration::Float64, snr::Float64, num_tr::Float64; b::Float64 = calc_impact_parameter(t.sys[s],p), diagonal::Bool = false )
period = t.sys[s].orbit[p].P
t0 = period*rand() # WARNING: Not being calculated from orbit
# Use variable names from Price & Rogers
one_minus_e2 = (1-t.sys[s].orbit[p].ecc)*(1+t.sys[s].orbit[p].ecc)
a_semimajor_axis = semimajor_axis(t.sys[s],p)
tau0 = rsol_in_au*t.sys[s].star.radius*period/(a_semimajor_axis*2pi)
tau0 *= sqrt(one_minus_e2)/(1+t.sys[s].orbit[p].ecc*sin(t.sys[s].orbit[p].omega))
r = t.sys[s].planet[p].radius/t.sys[s].star.radius
sqrt_one_minus_b2 = (0.0<=b<1.0) ? sqrt((1-b)*(1+b)) : 0.0
@assert(sqrt_one_minus_b2>=0.0)
if(b<1) # trapezoidal transit shape
T = 2*tau0*sqrt_one_minus_b2
tau = 2*tau0*r/sqrt_one_minus_b2
delta = depth
else # triangular transit shape, TODO: SCI DETAIL: Could improve treatment, but so rare this should be good enough for most purposes not involving EBs
@assert b<=1+r
T = 2*tau0*sqrt((1+r+b)*(1+r-b))
tau = T/2
delta = depth/2
end
Ttot = period
I = LC_integration_time # WARNING: Assumes LC only
Lambda_eff = LC_rate * num_tr # calc_expected_num_transits(t, s, p, sim_param)
sigma = interpolate_cdpp_to_duration(t, duration)
# Price & Rogers Eqn A8 & Table 1 # Thanks to Danley for finding typeos.
tau3 = tau^3
I3 = I^3
a1 = (10*tau3+2*I^3-5*tau*I^2)/tau3
a2 = (5*tau3+I3-5*tau*tau*I)/tau3
a3 = (9*I^5*Ttot-40*tau3*I*I*Ttot+120*tau^4*I*(3*Ttot-2*tau))/tau^6
a4 = (a3*tau^5+I^4*(54*tau-35*Ttot)-12*tau*I3*(4*tau+Ttot)+360*tau^4*(tau-Ttot))/tau^5
a5 = (a2*(24T*T*(I-3*tau)-24*T*Ttot*(I-3*tau))+tau3*a4)/tau3
a6 = (3*tau*tau+T*(I-3*tau))/(tau*tau)
a7 = (-60*tau^4+12*a2*tau3*T-9*I^4+8*tau*I3+40*tau3*I)/(tau^4)
a8 = (2T-Ttot)/tau
a9 = (-3*tau*tau*I*(-10*T*T+10*T*Ttot+I*(2*I+5*Ttot))-I^4*Ttot+8*tau*I3*Ttot)/(tau^5)
a10 = ((a9+60)*tau*tau+10*(-9*T*T+9*T*Ttot+I*(3*I+Ttot))-75*tau*Ttot)/(tau*tau)
a11 = (I*Ttot-3*tau*(Ttot-2*tau))/(tau*tau)
a12 = (-360*tau^5-24*a2*tau3*T*(I-3*tau)+9*I^5-35*tau*I^4-12*tau*tau*I3-40*tau3*I*I+360*tau^4*I)/(tau^5)
a13 = (-3*I3*(8*T*T-8*T*Ttot+3*I*Ttot)+120*tau*tau*T*I*(T-Ttot)+8*tau*I3*Ttot)/tau^5
a14 = (a13*tau*tau+40*(-3*T*T+3*T*Ttot+I*Ttot)-60*tau*Ttot)/(tau*tau)
a15 = (2*I-6*tau)/tau
b1 = (6*I*I-3*I*Ttot+tau*Ttot)/(I*I)
b2 = (tau*T+3*I*(I-T))/(I*I)
b3 = (tau3-12*T*I*I+8*I3+20*tau*I*I-8*tau*tau*I)/I3
b4 = (6*T*T-6*T*Ttot+I*(5*Ttot-4*I))/(I*I)
b5 = (10*I-3*tau)/I
b6 = (12*b4*I3+4*tau*(-6*T*T+6T*Ttot+I*(13*Ttot-30*I)))/I3
b7 = (b6*I^5+4*tau*tau*I*I*(12*I-11*Ttot)+tau3*I*(11*Ttot-6*I)-tau^4*Ttot)/I^5
b8 = (3T*T-3*T*Ttot+I*Ttot)/(I*I)
b9 = (8*b8*I^4+20*tau*I*I*Ttot-8*tau*tau*I*Ttot+tau3*Ttot)/I^4
b10 = (-tau^4+24*T*I*I*(tau-3I)+60*I^4+52*tau*I3-44*tau*tau*I*I+11*tau3*I)/I^4
b11 = (-15*b4*I3+10*b8*tau*I*I+15*tau*tau*(2*I-Ttot))/I3
b12 = (b11*I^5+2*tau3*I*(4*Ttot-3*I)-tau^4*Ttot)/I^5
b13 = (Ttot-2*T)/I
b14 = (6*I-2*tau)/I
Q = snr/sqrt(num_tr)
sigma_t0 = tau>=I ? sqrt(0.5*tau*T/(1-I/(3*tau)))/Q : sqrt(0.5*I*T/(1-tau/(3*I)))/Q
sigma_period = sigma_t0/sqrt(num_tr)
sigma_duration = tau>=I ? sigma*sqrt(abs(6*tau*a14/(delta*delta*a5)) /Lambda_eff ) : sigma*sqrt(abs(6*I*b9/(delta*delta*b7)) / Lambda_eff)
sigma_depth = tau>=I ? sigma*sqrt(abs(-24*a11*a2/(tau*a5)) / Lambda_eff) : sigma*sqrt(abs(24*b1/(I*b7)) / Lambda_eff)
sigma_obs = TransitPlanetObs( sigma_period, sigma_t0, sigma_depth, sigma_duration )
local obs
if diagonal # Assume uncertainties uncorrelated (Diagonal)
obs = TransitPlanetObs( period*(1.0+sigma_obs.period*randtn()), t0*(1.0+sigma_obs.period*randtn()), depth*(1.0+sigma_obs.depth*randtn()),duration*(1.0+sigma_obs.duration*randtn()))
else # TODO WARNING TEST: Should test before using full covariance matrix
cov = zeros(4,4)
if tau>=I
# cov[0,0] = -3*tau/(delta*delta*a15)
cov[1,1] = 24*tau*a10/(delta*delta*a5)
cov[1,2] = cov[2,1] = 36*a8*tau*a1/(delta*delta*a5)
cov[1,3] = cov[3,1] = -12*a11*a1/(delta*a5)
cov[1,4] = cov[4,1] = -12*a6*a1/(delta*a5)
cov[2,2] = 6*tau*a14/(delta*delta*a5)
cov[2,3] = cov[3,2] = 72*a8*a2/(delta*a5)
cov[2,4] = cov[4,2] = 6*a7/(delta*a5)
cov[3,3] = -24*a11*a2/(tau*a5)
cov[3,4] = cov[4,3] = -24*a6*a2/(tau*a5)
cov[4,4] = a12/(tau*a5)
else
# cov[0,0] = 3*I/(delta*delta*b14)
cov[1,1] = -24*I*I*b12/(delta*delta*tau*b7)
cov[1,2] = cov[2,1] = 36*I*b13*b5/(delta*delta*b7)
cov[1,3] = cov[3,1] = 12*b5*b1/(delta*b7)
cov[1,4] = cov[4,1] = 12*b5*b2/(delta*b7)
cov[2,2] = 6*I*b9/(delta*delta*b7)
cov[2,3] = cov[3,2] = 72*b13/(delta*b7)
cov[2,4] = cov[4,2] = 6*b3/(delta*b7)
cov[3,3] = 24*b1/(I*b7)
cov[3,4] = cov[4,3] = 24*b2/(I*b7)
cov[4,4] = b10/(I*b7)
end
cov .*= sigma*sigma/Lambda_eff
cov = make_matrix_pos_def(cov)
obs_dist = MvNormal(zeros(4),cov)
local obs_period, obs_duration, obs_depth, sigma_period, sigma_duration, sigma_depth
isvalid = false
while !isvalid
obs_vec = rand(obs_dist)
obs_duration = duration + obs_vec[2]
obs_depth = depth + obs_vec[3]
if (obs_duration>0.0) && (obs_depth>0.0)
isvalid = true
end
end # while
obs = TransitPlanetObs( period*(1.0+sigma_obs.period*randn()), t0*(1.0+sigma_obs.t0*randn()), obs_depth, obs_duration)
end
return obs, sigma_obs
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 25252 | ## ExoplanetsSysSim/src/transit_prob_geometric.jl
## (c) 2015 Eric B. Ford
# Transit probability expressions if treat planets separately
function calc_transit_prob_single_planet_approx(P::Float64, Rstar::Float64, Mstar::Float64)
return min(rsol_in_au*Rstar/semimajor_axis(P,Mstar), 1.0)
end
#function calc_transit_prob_single_planet_obs_ave(ps::PlanetarySystemAbstract, pl::Integer)
function calc_transit_prob_single_planet_obs_ave(ps::PlanetarySystem{StarT}, pl::Integer) where {StarT<:StarAbstract}
ecc::Float64 = ps.orbit[pl].ecc
a::Float64 = semimajor_axis(ps,pl)
Rstar::Float64 = rsol_in_au*ps.star.radius
return min(Rstar/(a*(1-ecc)*(1+ecc)), 1.0)
end
calc_transit_prob_single_planet_obs_ave(t::KeplerTarget, s::Integer, p::Integer) = calc_transit_prob_single_planet_obs_ave(t.sys[s], p)
#=
# WARNING: This knows about e and w, but still returns a fraction rather than a 0 or 1. Commented out for now, so no one uses it accidentally until we figure out why it was this way
function calc_transit_prob_single_planet_one_obs(ps::PlanetarySystemAbstract, pl::Integer)
where {StarT<:StarAbstract}
ecc::Float64 = ps.orbit[pl].ecc
a::Float64 = semimajor_axis(ps,pl)
Rstar::Float64 = rsol_in_au*ps.star.radius
return min(Rstar*(1+ecc*sin(ps.orbit[pl].omega))/(a*(1-ecc)*(1+ecc)), 1.0)
end
calc_transit_prob_single_planet_one_obs(t::KeplerTarget, s::Integer, p::Integer) = calc_transit_prob_single_planet_one_obs(t.sys[s], p)
=#
# WARNING: Assumes that planets with b>1 won't be detected/pass vetting
#function does_planet_transit(ps::PlanetarySystemAbstract, pl::Integer)
function does_planet_transit(ps::PlanetarySystem{StarT}, pl::Integer) where {StarT<:StarAbstract}
ecc::Float64 = ps.orbit[pl].ecc
incl::Float64 = ps.orbit[pl].incl
a::Float64 = semimajor_axis(ps,pl)
Rstar::Float64 = rsol_in_au*ps.star.radius
if (Rstar >= (a*(1-ecc)*(1+ecc)*abs(cos(incl)))/(1+ecc*sin(ps.orbit[pl].omega)))
return true
else
return false
end
end
#function corbits_placeholder_obs_ave( ps::PlanetarySystemSingleStar, use_pl::Vector{Cint} ) # Might be useful for someone to test w/o CORBITS
function corbits_placeholder_obs_ave( ps::PlanetarySystem{StarT}, use_pl::Vector{Cint} ) where {StarT<:StarAbstract} # Might be useful for someone to test w/o CORBITS
n = num_planets(ps)
prob = 1.0
for p in 1:n
ptr = calc_transit_prob_single_planet_obs_ave(ps,p)
prob *= (use_pl[p]==1) ? ptr : 1.0-ptr
#=
if(use_pl[p]==1)
prob *= calc_transit_prob_single_planet_obs_ave(ps,p)
else
prob *= 1.0-calc_transit_prob_single_planet_obs_ave(ps,p)
end
=#
end
return prob
end
#function calc_impact_parameter(ps::PlanetarySystemSingleStar, pl::Integer)
function calc_impact_parameter(ps::PlanetarySystem{StarT}, pl::Integer) where {StarT<:StarAbstract}
one_minus_e2 = (1-ps.orbit[pl].ecc)*(1+ps.orbit[pl].ecc)
a_semimajor_axis = semimajor_axis(ps,pl)
b = a_semimajor_axis *cos(ps.orbit[pl].incl)/(ps.star.radius*rsol_in_au)
b *= one_minus_e2/(1+ps.orbit[pl].ecc*sin(ps.orbit[pl].omega))
b = abs(b)
end
#function prob_combo_transits_one_obs( ps::PlanetarySystemSingleStar, use_pl::Vector{Cint} )
function prob_combo_transits_one_obs( ps::PlanetarySystem{StarT}, use_pl::Vector{Cint} ) where {StarT<:StarAbstract}
n = num_planets(ps)
for p in 1:n
#one_minus_e2 = (1-ps.orbit[p].ecc)*(1+ps.orbit[p].ecc)
#a_semimajor_axis = semimajor_axis(ps,p)
#b = a_semimajor_axis *cos(ps.orbit[p].incl)/ps.star.radius
#b *= one_minus_e2/(1+ps.orbit[p].ecc*sin(ps.orbit[p].omega))
b = calc_impact_parameter(ps, p)
if ! ( (b<=1.0 && use_pl[p]==1) || (b> 1.0 && use_pl[p]!=1) )
return 0.0
end
end
return 1.0
end
struct prob_combo_transits_obs_ave_workspace_type
a::Vector{Cdouble}
r::Vector{Cdouble}
ecc::Vector{Cdouble}
Omega::Vector{Cdouble}
omega::Vector{Cdouble}
inc::Vector{Cdouble}
function prob_combo_transits_obs_ave_workspace_type(n::Integer)
@assert(1<=n<=100)
new( Array{Cdouble}(n), Array{Cdouble}(n), Array{Cdouble}(n), Array{Cdouble}(n), Array{Cdouble}(n), Array{Cdouble}(n) )
end
end
#=
# Attempt to reduce memory allocations. It does that, but no noticable speed improvement, so I'm leaving it commented out for now.
const global corbits_max_num_planets_per_system = 20
prob_combo_transits_obs_ave_workspace = prob_combo_transits_obs_ave_workspace_type(corbits_max_num_planets_per_system)
#function prob_combo_transits_obs_ave( ps::PlanetarySystemSingleStar, use_pl::Vector{Cint}; print_orbit::Bool = false)
function prob_combo_transits_obs_ave( ps::PlanetarySystem{StarT}, use_pl::Vector{Cint}; print_orbit::Bool = false) where {StarT<:StarAbstract}
n = num_planets(ps)
@assert(n<=corbits_max_num_planets_per_system)
for i in 1:n
prob_combo_transits_obs_ave_workspace.a[i] = semimajor_axis(ps,i)
prob_combo_transits_obs_ave_workspace.r[i] = ps.planet[i].radius * rsol_in_au
prob_combo_transits_obs_ave_workspace.ecc[i] = ps.orbit[i].ecc
prob_combo_transits_obs_ave_workspace.Omega[i] = ps.orbit[i].asc_node
prob_combo_transits_obs_ave_workspace.omega[i] =ps.orbit[i].omega
prob_combo_transits_obs_ave_workspace.inc[i] = ps.orbit[i].incl
end
r_star = convert(Cdouble,ps.star.radius * rsol_in_au )
prob = prob_of_transits_approx(prob_combo_transits_obs_ave_workspace.a, r_star, prob_combo_transits_obs_ave_workspace.r, prob_combo_transits_obs_ave_workspace.ecc, prob_combo_transits_obs_ave_workspace.Omega, prob_combo_transits_obs_ave_workspace.omega, prob_combo_transits_obs_ave_workspace.inc, use_pl)
#prob = prob_of_transits_approx(a, r_star, r, ecc, Omega, omega, inc, use_pl)
if print_orbit
println("# a = ", prob_combo_transits_obs_ave_workspace.a)
println("# r_star = ", r_star)
println("# r = ", prob_combo_transits_obs_ave_workspace.r)
println("# ecc = ", prob_combo_transits_obs_ave_workspace.ecc)
println("# Omega = ", prob_combo_transits_obs_ave_workspace.Omega)
println("# omega = ", prob_combo_transits_obs_ave_workspace.omega)
println("# inc = ", prob_combo_transits_obs_ave_workspace.inc)
println("# use_pl = ", use_pl)
println("")
end
return prob
end
=#
#function prob_combo_transits_obs_ave( ps::PlanetarySystemSingleStar, use_pl::Vector{Cint}; print_orbit::Bool = false)
function prob_combo_transits_obs_ave( ps::PlanetarySystem{StarT}, use_pl::Vector{Cint}; print_orbit::Bool = false) where {StarT<:StarAbstract}
n = num_planets(ps)
a = Cdouble[ semimajor_axis(ps,i) for i in 1:n ]
r_star = convert(Cdouble,ps.star.radius * rsol_in_au )
r = Cdouble[ ps.planet[i].radius * rsol_in_au for i in 1:n ]
ecc = Cdouble[ ps.orbit[i].ecc for i in 1:n ]
Omega = Cdouble[ ps.orbit[i].asc_node for i in 1:n ]
omega = Cdouble[ ps.orbit[i].omega for i in 1:n ]
inc = Cdouble[ ps.orbit[i].incl for i in 1:n ]
#use_pl = Cint[0 for i in 1:n]
#for i in 1:length(combo)
# use_pl[i] = 1
#end
prob = prob_of_transits_approx(a, r_star, r, ecc, Omega, omega, inc, use_pl)
if print_orbit
println("# a = ", a)
println("# r_star = ", r_star)
println("# r = ", r)
println("# ecc = ", ecc)
println("# Omega = ", Omega)
println("# omega = ", omega)
println("# inc = ", inc)
println("# use_pl = ", use_pl)
println("")
end
return prob
end
@compat abstract type SystemDetectionProbsAbstract end
@compat abstract type SystemDetectionProbsTrait end
@compat abstract type SkyAveraged <: SystemDetectionProbsTrait end
@compat abstract type OneObserver <: SystemDetectionProbsTrait end
# Derived types will allow us to specialize depending on whether using sky-averaged values, values for actual geometry (both of which require the physical catalog), or estimates based on observed data
mutable struct SimulatedSystemDetectionProbs{T<:SystemDetectionProbsTrait} <: SystemDetectionProbsAbstract # To be used for simulated systems where we can calculat everything
# Inputs to CORBITS
detect_planet_if_transits::Vector{Float64} # Probability of detecting each planet, averaged over all observers for each planet individually, assumes b~U[0,1); To be used in pass 1
# Outputs from CORBITS
pairwise::Matrix{Float64} # detection probability (incl geometry & detection probability) for each planet (diagonal) and each planet pair (off diagonal)
# TODO OPT: Make matrix symmetric to save memory?
n_planets::Vector{Float64} # fraction of time would detect n planets (incl. geometry & detection probability)
combo_detected::Vector{Vector{Int64}} # List of combinations of planets drawn from full joint multi-transit probability
end
# Removed since not really need and previous had used alised typename and function
# Specialize so know whether the values are sky averaged or not
#typealias SkyAveragedSystemDetectionProbs SimulatedSystemDetectionProbs{SkyAveraged}
#typealias OneObserverSystemDetectionProbs SimulatedSystemDetectionProbs{OneObserver}
#SkyAveragedSystemDetectionProbs = SimulatedSystemDetectionProbs{SkyAveraged}
#OneObserverSystemDetectionProbs = SimulatedSystemDetectionProbs{OneObserver}
function SimulatedSystemDetectionProbs(traits::Type, p::Vector{Float64}; num_samples::Integer = 1)
SimulatedSystemDetectionProbs{traits}( p, zeros(length(p),length(p)), zeros(length(p)), fill(Array{Int64}(undef,0), num_samples) )
end
function SimulatedSystemDetectionProbs(traits::Type, n::Integer; num_samples::Integer = 1)
SimulatedSystemDetectionProbs{traits}( ones(n), zeros(n,n), zeros(n), fill(Array{Int64}(undef,0), num_samples) )
end
SkyAveragedSystemDetectionProbs(p::Vector{Float64}; num_samples::Integer = 1) = SimulatedSystemDetectionProbs( SkyAveraged, p, num_samples=num_samples)
SkyAveragedSystemDetectionProbs(n::Integer; num_samples::Integer = 1) = SimulatedSystemDetectionProbs(SkyAveraged, n, num_samples=num_samples)
SkyAveragedSystemDetectionProbsEmpty() = SimulatedSystemDetectionProbs(SkyAveraged,0)
OneObserverSystemDetectionProbs(p::Vector{Float64}; num_samples::Integer = 1) = SimulatedSystemDetectionProbs( OneObserver, p, num_samples=num_samples)
OneObserverSystemDetectionProbs(n::Integer; num_samples::Integer = 1) = SimulatedSystemDetectionProbs(OneObserver, n, num_samples=num_samples)
OneObserverSystemDetectionProbsEmpty() = SimulatedSystemDetectionProbs(OneObserver,0)
# Functions common to various types of SystemDetectionProbs
num_planets(prob::SimulatedSystemDetectionProbs{T}) where T<:SystemDetectionProbsTrait = length(prob.detect_planet_if_transits)
function prob_detect_if_transits(prob::SimulatedSystemDetectionProbs{T}, pl_id::Integer) where T<:SystemDetectionProbsTrait
@assert 1<=pl_id<=length(prob.detect_planet_if_transits)
prob.detect_planet_if_transits[pl_id]
end
function prob_detect(prob::SimulatedSystemDetectionProbs{T}, pl_id::Integer) where T<:SystemDetectionProbsTrait
if ! (1<=pl_id<=size(prob.pairwise,1) )
println("#ERROR: pl_id =", pl_id, " prob.pairwise= ", prob.pairwise)
end
@assert 1<=pl_id<=size(prob.pairwise,1)
return prob.pairwise[pl_id,pl_id]
end
function prob_detect_both_planets(prob::SimulatedSystemDetectionProbs{T}, pl_id::Integer, ql_id::Integer) where T<:SystemDetectionProbsTrait
@assert 1<=pl_id<=size(prob.pairwise,1)
@assert 1<=ql_id<=size(prob.pairwise,1)
prob.pairwise[pl_id,ql_id]
end
prob_detect_n_planets(prob::SimulatedSystemDetectionProbs{T}, n::Integer) where T<:SystemDetectionProbsTrait = 1<=n<=length(prob.n_planets) ? prob.n_planets[n] : 0.0
# Compute sky-averaged transit probabilities from a planetary system with known physical properties, assuming a single host star
#function calc_simulated_system_detection_probs(ps::PlanetarySystemSingleStar, prob_det_if_tr::Vector{Float64}; num_samples::Integer = 1, max_tranets_in_sys::Integer = 10, min_detect_prob_to_be_included::Float64 = 0.0, observer_trait::Type=SkyAveraged)
function calc_simulated_system_detection_probs(ps::PlanetarySystem{StarT}, prob_det_if_tr::Vector{Float64}; num_samples::Integer = 1, max_tranets_in_sys::Integer = 10, min_detect_prob_to_be_included::Float64 = 0.0, observer_trait::Type=SkyAveraged) where {StarT<:StarAbstract}
@assert observer_trait <: SystemDetectionProbsTrait
@assert num_planets(ps) == length(prob_det_if_tr)
idx_detectable = findall(x->x>0.0,prob_det_if_tr)
n = length(idx_detectable)
@assert n <= max_tranets_in_sys # Make sure memory to store these
#if n==0
# println("# WARNING found no detectable planets based on ",prob_det_if_tr)
#end
invalid_prob_flag = false
#ps_detectable = PlanetarySystemSingleStar(ps,idx_detectable)
#ps_detectable = PlanetarySystem(ps,idx_detectable)
ps_detectable = PlanetarySystem(ps.star,ps.planet[idx_detectable],ps.orbit[idx_detectable])
combo_sample_probs = rand(num_samples)
combo_cum_probs = zeros(num_samples)
#sdp = SimulatedSystemDetectionProbs{observer_trait}(prob_det_if_tr[idx_detectable])
sdp = SimulatedSystemDetectionProbs(observer_trait,prob_det_if_tr[idx_detectable])
planet_should_transit = zeros(Cint,n)
for p in 1:num_planets(sdp)
sdp.pairwise[p,p] = 0.0
end
for ntr in 1:min(n,max_tranets_in_sys) # Loop over number of planets transiting
sdp.n_planets[ntr] = 0.0
for combo in combinations(1:n,ntr) # Loop over specific combinations of detectable planets
prob_det_this_combo = 1.0
for p in combo # Loop over each planet in this combination of detectable planets
prob_det_this_combo *= prob_det_if_tr[idx_detectable[p]]
end
if prob_det_this_combo < min_detect_prob_to_be_included
continue
end
fill!(planet_should_transit,zero(Cint))
for i in 1:length(combo)
planet_should_transit[combo[i]] = one(Cint)
end
local geo_factor::Float64
if observer_trait == SkyAveraged
geo_factor = prob_combo_transits_obs_ave(ps_detectable,planet_should_transit)
elseif observer_trait == OneObserver
geo_factor = prob_combo_transits_one_obs(ps_detectable,planet_should_transit)
else
error(string("typeof(",observer_trait,") is not a valid trait."))
end
prob_det_this_combo *= geo_factor
# Store samples of combinations of planets detected drawn from the full joint multi-planet density
for i in 1:num_samples
if combo_cum_probs[i] < combo_sample_probs[i] <= combo_cum_probs[i]+prob_det_this_combo
sdp.combo_detected[i] = combo
end
end
combo_cum_probs .+= prob_det_this_combo
sdp.n_planets[ntr] += prob_det_this_combo # Accumulate the probability of detecting any n planets
for p in combo # Accumulate the probability of detecting each planet individually
sdp.pairwise[p,p] += prob_det_this_combo
end
#=
for pq in combinations(combo,2) # Accumulate the probability of detecting each planet pair # TODO: OPT: replace with simply calculating integers for pairs to avoid allocations of small arrays
sdp.pairwise[pq[1],pq[2]] = prob_det_this_combo
sdp.pairwise[pq[2],pq[1]] = prob_det_this_combo # TODO OPT: Remove if use symmetric matrix type.
end
=#
if length(combo)>=2
for pi in 2:length(combo) # Accumulate the probability of detecting each planet pair # TODO: OPT: replace with simply calculating integers for pairs to avoid allocations of small arrays
p = combo[pi]
for qi in 1:(pi-1)
q = combo[qi]
sdp.pairwise[p,q] = prob_det_this_combo
sdp.pairwise[q,p] = prob_det_this_combo # TODO OPT: Remove if use symmetric matrix type.
end # qi
end # pi
end # if
end # combo
end # ntr
for p in 1:n
if sdp.pairwise[p,p] > 1.0
invalid_prob_flag = true
println(string("Error! Invalid prob for planet ",p,": ", sdp.pairwise[p,p]))
end
end
if invalid_prob_flag
println("")
for ntr in 1:min(n,max_tranets_in_sys)
for combo in combinations(1:n,ntr)
fill!(planet_should_transit,zero(Cint))
for i in 1:length(combo)
planet_should_transit[combo[i]] = one(Cint)
end
if length(combo) == n
geo_factor = prob_combo_transits_obs_ave(ps_detectable,planet_should_transit, print_orbit = true)
else
geo_factor = prob_combo_transits_obs_ave(ps_detectable,planet_should_transit)
end
println(string("Geo. factor of ",combo," = ",geo_factor))
end
end
println(string("Det. prob. = ", prob_det_if_tr[idx_detectable]))
println("")
#quit()
end
return sdp
end
if false # WARNING: Complicated and untested
function combine_system_detection_probs(prob::Vector{SimulatedSystemDetectionProbs{T}}, s1::Integer, s2::Integer) where T # WARNING: Complicated and untested
npl_s1 = min(num_planets(prob[s1]), max_tranets_in_sys)
npl_s2 = min(num_planets(prob[s2]), max_tranets_in_sys)
num_planets_across_systems = npl_s1 + npl_s2
prob_merged = SimulatedSystemDetectionProbs{T}(num_planets_across_systems)
# Copy probabilities for detecting planets and planet pairs within one system
prob_merged.detect_planet_if_transits[1:npl_s1] = prob[s1].detect_planet_if_transits
prob_merged.pairwise[1:npl_s1,1:npl_s1] = prob[s1].pairwise
offset = npl_s1
prob_merged.detect_planet_if_transits[offset+1:offset+npl_s2] = prob[s2].detect_planet_if_transits
prob_merged.pairwise[offset+1:offset+npl_s2,offset+1:offset+npl_s2] = prob[s2].pairwise
# Calculate probabilities of detecting pairs of planets in different systems, assuming uncorrelated orientations
for p1 in 1:npl_s1
for p2 in 1:npl_s2
prob_detect_both = prob_detect(prob[s1],p1)*prob_detect(prob[s2],p2)
idx1 = p1
idx2 = offset+p2
prob_merged.pairwise[idx1,idx2] = prob_detect_both
prob_merged.pairwise[idx2,idx1] = prob_detect_both
end
end
# Merge probabilities of detecting n_planets, assuming uncorrelated orientations
p_zero_pl_s1 = 1.0-sum(prob[s1].n_planets)
p_zero_pl_s2 = 1.0-sum(prob[s2].n_planets)
for n in 1:min(num_planets_across_systems,max_tranets_in_sys)
prob_merged.n_planets[n] = p_zero_pl_s1*prob[s2].n_planets[n] + p_zero_pl_s2*prob[s1].n_planets[n]
for i in 1:n-1
prob_merged.n_planets[n] += prob[s1].n_planets[i]*prob[s2].n_planets[n-i] + prob[s1].n_planets[n-i]*prob[s2].n_planets[i]
end
end
# Combine samples of detected planet combinations
prob_merged.combo_detected = fill(Array{Int64}(undef,0), min(length(prob[s1].combo_detected), length(prob[s2].combo_detected) ) )
for i in 1:length(prob_merged.combo_detected)
prob_merged.combo_detected[i] = vcat( prob[s1].combo_detected, prob[s1].combo_detected+offset )
end
return prob_merged
end
function select_subset(prob::SimulatedSystemDetectionProbs{T}, idx::Vector{Int64}) # WARNING: Complicated and untested where {T<:SystemDetectionProbsTrait}
n = length(idx)
subset = SimulatedSystemDetectionProbs{T}(n)
subset.detect_planet_if_transits = prob.detect_planet_if_transits[idx]
subset.pairwise = ones(n,n)
subset.n_planets = zeros(max(n,length(prob.n_planets)))
if 1<=n<=length(subset.n_planets)
subset.n_planets[n] = 1.0
end
subset.combo_detected = Array{Int64,1}[collect(1:n)]
return subset
end
# ASSUMING: Planetary systems for same target are uncorrelated
# Compute sky-averaged transit probabilities from a target with known physical properties
function calc_simulated_system_detection_probs(t::KeplerTarget, sim_param::SimParam ) # WARNING: Complicated and untested
max_tranets_in_sys = get_int(param,"max_tranets_in_sys",10)
min_detect_prob_to_be_included = get(param,"max_tranets_in_sys", 0.0)
s1 = findfirst(num_planets,t.sys)
if num_planets(t) == num_planets(t.sys[s1])
# Target has only one system with planets
prob_det_if_tr = Float64[calc_ave_prob_detect_if_transit(t, s1, p, sim_param) for p in 1:num_planets(t.sys[s1])]
return calc_simulated_system_detection_probs(t.sys[s1], prob_det_if_tr, max_tranets_in_sys=max_tranets_in_sys, min_detect_prob_to_be_included=min_detect_prob_to_be_included )
else
# Target has multiple systems with planets
sdp = SkyAveragedSystemDetectionProbs[ SkyAveragedSystemDetectionProbs( min(num_planets(t.sys[s]),max_tranets_in_sys) ) for s in 1:length(t.sys) ]
num_planets_across_systems = 0
# Calculate detection probabilities for each system separately
for s in 1:length(t.sys)
prob_det_if_tr = Float64[calc_ave_prob_detect_if_transit(t, s, p, sim_param) for p in 1:num_planets(t.sys[s])]
sdp[s] = calc_simulated_system_detection_probs(t.sys[s], prob_det_if_tr, max_tranets_in_sys=max_tranets_in_sys, min_detect_prob_to_be_included=min_detect_prob_to_be_included )
num_planets_across_systems += num_planets(t.sys[s])
end
@assert num_planets_across_systems <= max_tranets_in_sys # Make sure memory to store these # QUERY: DETAIL: Should we relax?
# Find system ids for first two systems with planets
#s1, s2 = find_system_detection_probs_with_planets(::Vector{})
more_than_two_systems_with_planets = false
s1 = 0
s2 = 0
for s in 1:length(t.sys)
if num_planets(t.sys[s])>=1
if s1==0
s1 = s
elseif s2==0
s2 = s
else
more_than_two_systems_with_planets = true
end
end
end
@assert (s1!=0) && (s2!=0)
@assert !more_than_two_systems_with_planets
sdp_merged = combine_system_detection_probs(sdp,s1,s2) # Merge SystemDetectionProbs across systems with common target
return sdp_merged
end
end
end
mutable struct ObservedSystemDetectionProbs <: SystemDetectionProbsAbstract # TODO OPT: For observed systems (or simulations of observed systems) were we can't know everything. Is this even used? Or should we just compute these on the fly, rather than storing them? Do we even want to keep this?
planet_transits::Vector{Float64} # Probability that each planet transits individually for one observer based on actual i, e, and omega
detect_planet_if_transits::Vector{Float64} # Probability of detecting each planet given that it transits. Assumes one observer based on actual i, e and omega
# snr::Vector{Float64} # Dimensionless SNR of detection for each planet QUERY: Should we store this here?
end
ObservedSystemDetectionProbs(p::Vector{Float64}) = ObservedSystemDetectionProbs( ones(length(p)), p )
ObservedSystemDetectionProbs(n::Integer) = ObservedSystemDetectionProbs( ones(n), zeros(n) )
ObservedSystemDetectionProbsEmpty() = ObservedSystemDetectionProbs(0)
# Functions common to various types of SystemDetectionProbs
num_planets(prob::ObservedSystemDetectionProbs) = length(prob.detect_planet_if_transits)
prob_detect_if_transits(prob::ObservedSystemDetectionProbs, pl_id::Integer) = prob.detect_planet_if_transits[pl_id]
prob_detect(prob::ObservedSystemDetectionProbs, pl_id::Integer) = prob.planet_transits[pl_id]*prob.detect_planet_if_transits[pl_id]
#prob_detect_both_planets(prob::ObservedSystemDetectionProbs, pl_id::Integer, ql_id::Integer) = prob_detect(prob,pl_id) * prob_detect(prob,ql_id) # WARNING: Assumes independent. Intent is for testing CORBITS. Or should we delete?
#if false
function prob_detect_n_planets(prob::ObservedSystemDetectionProbs, n::Integer) # WARNING: Assumes independent. Intent is for testing CORBITS. May wnat to comment out to prevent accidental use.
if n<1 || n > num_planets(prob) return 0.0 end
sum_prob = 0.0
for combo in combinations(1:num_planets(prob), n)
prob_this_combo = 1.0
for pl_id in combo
prob_this_combo *= prob_detect(prob,pl_id)
end
sum_prob += prob_this_combo
end
return sum_prob
end
#=
# Compute transit probabilities for a single observer from a target with known physical properties
function calc_observed_system_detection_probs(targ::KeplerTarget, sim_param::SimParam)
n = num_planets(targ)
pdet = zeros(n)
ptr = zeros(n)
pl = 1
for s in 1:length(targ.sys)
for p in 1:length(targ.sys[s].planet)
pdet[pl] = calc_prob_detect_if_transit_with_actual_b(targ, s, p, sim_param)
ptr[pl] = calc_transit_prob_single_planet_one_obs(targ, s, p)
pl += 1
end
end
ObservedSystemDetectionProbs( ptr, pdet )
end
=#
# Estimate transit probabilities for a single observer from a target with known physical properties.
if false # Do we actually want this for anything?
function calc_observed_system_detection_probs(kto::KeplerTargetObs, sim_param::SimParam)
n = num_planets(kto)
pdet = ones(n) # WARNING: We assume all observed objects were detected and we don't have enough info to calcualte a detection probability. Do we want to do something different?
ptr = zeros(n)
for pl in 1:n
ptr[pl] = calc_transit_prob_single_planet_approx(kto.obs[pl].period, kto.star.radius, kto.star.mass )
end
ObservedSystemDetectionProbs( ptr, pdet )
end
end
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 15269 | ## ExoplanetsSysSim/src/window_function.jl
## (c) 2018 Darin Ragozzine
# Gather and prepare the window function data
module WindowFunction
import ..cdpp_durations
export setup_window_function, get_window_function_data, get_window_function_id, eval_window_function
#using DataArrays
using DataFrames
#using CSV
#using JLD2
using FileIO
using ExoplanetsSysSim
using ExoplanetsSysSim.SimulationParameters
# Object to hold window function data
struct window_function_data
window_func_array::Array{Float64,3} # Value of window function (window_function_id, duration_id, period_id). Maybe rename to wf_value or data?
wf_durations_in_hrs::Array{Float64,1} # Boundaries for duration bins in window_func_array. Maybe rename to durations?
wf_periods_in_days::Array{Float64,1} # Boundaries for periods bins in window_func_array. Maybe rename to periods?
sorted_quarter_strings::Array{Int64,1} # TODO OPT: Is there a reason to keep this? Maybe rename to quarter_strings?
allsortedkepids::Array{Int64,1} # value is Kepler ID. Index is same as index to window_function_id_arr
window_function_id_arr::Array{Int64,1} # value is index for window_func_array. Index is same as index to allsortedkepids
default_wf_id::Int64 # Index corresponding to the default window function
already_warned::Array{Bool,1} # Wether we've already thrown a warning about this kepid
end
function window_function_data()
window_function_data( Array{Float64,3}(undef,0,0,0), Array{Float64,1}(undef,0),Array{Float64,1}(undef,0), Array{Int64,1}(undef,0),Array{Int64,1}(undef,0),Array{Int64,1}(undef,0), 0, falses(0) )
end
win_func_data = window_function_data()
function setup(sim_param::SimParam; force_reread::Bool = false)
global win_func_data
if haskey(sim_param,"read_window_function") && !force_reread
return win_func_data
end
window_function_filename = convert(String,joinpath(dirname(pathof(ExoplanetsSysSim)),
"..", "data", convert(String,get(sim_param,"window_function","DR25topwinfuncs.jld2")) ) )
setup(window_function_filename)
add_param_fixed(sim_param,"read_window_function",true)
@assert( size(win_func_data.window_func_array,2) == length(win_func_data.wf_durations_in_hrs) )
@assert( size(win_func_data.window_func_array,3) == length(win_func_data.wf_periods_in_days) )
@assert( size(win_func_data.window_func_array,1) >= maximum(win_func_data.window_function_id_arr) )
@assert( size(win_func_data.window_func_array,1) >= win_func_data.default_wf_id )
return win_func_data
end
function setup(filename::String)
# Reads in the window function data collected from the Kepler Completeness Products
# see Darin Ragozzine's get/cleanDR25winfuncs.jl
if occursin(r".jld2$",filename)
try
wfdata = load(filename)
window_func_array = wfdata["window_func_array"]
wf_durations_in_hrs = wfdata["wf_durations_in_hrs"] # TODO OPT DETAIL: Should we convert units to days here?
wf_periods_in_days = wfdata["wf_periods_in_days"]
sorted_quarter_strings = wfdata["sorted_quarter_strings"]
allsortedkepids = wfdata["allsortedkepids"]
window_function_id_arr = wfdata["window_function_id_arr"]
already_warned = falses(length(allsortedkepids))
global win_func_data = window_function_data(window_func_array, wf_durations_in_hrs, wf_periods_in_days, sorted_quarter_strings,
allsortedkepids, window_function_id_arr, maximum(window_function_id_arr), already_warned )
catch
error(string("# Failed to read window function data > ", filename," < in jld2 format."))
end
end
return win_func_data
end
setup_window_function(sim_param::SimParam; force_reread::Bool = false) = setup(sim_param, force_reread=force_reread)
setup_window_function(filename::String; force_reread::Bool = false) = setup(filename, force_reread=force_reread)
function get_window_function_data()::window_function_data
#global win_func_data
return win_func_data
end
function get_window_function_id(kepid::Int64; use_default_for_unknown::Bool = true)::Int64
# takes the quarter string from the stellar catalog and determines the window function id
# from DR25topwinfuncs.jld2 made by Darin Ragozzine's cleanDR25winfuncs.jl script.
no_win_func_available::Int64 = -1 # hardcoding this in, should match convention in window function input file
idx = searchsortedfirst(win_func_data.allsortedkepids,kepid) # all Kepler kepids are in allsortedkepids
wf_id = win_func_data.window_function_id_arr[idx]
if wf_id == no_win_func_available && use_default_for_unknown
# if a target is observed for less than 4 quarters, then it won't have a corresponding
# window function in this list, so throw a warning and use the last window_function_id
# which corresponds to an "averaged" window function
if !win_func_data.already_warned[idx]
win_func_data.already_warned[idx] = true
if sum(win_func_data.already_warned) < 20
@warn "Window function data is not avaialble for kepid $kepid, using default."
end
end
wf_id = win_func_data.default_wf_id
end
# TODO SCI DETAIL IMPORTANT? This does not include TPS timeouts or MESthresholds (see DR25 Completeness Products)
return wf_id
end
function calc_period_idx(P::Float64)::Int64
@assert(P>zero(P))
idx = searchsortedlast(win_func_data.wf_periods_in_days,P)
if idx == 0
return 1
elseif idx<length(win_func_data.wf_periods_in_days)
if P-win_func_data.wf_periods_in_days[idx]>win_func_data.wf_periods_in_days[idx+1]-P
idx += 1
end
end
return idx
end
function calc_duration_idx(D::Float64)::Int64
# NOTE: Currently assumes we left wf data in hours, so deal with that conversion here
@assert(D>=zero(D)) ##### Make sure this function is still doing the right thing if D = 0!
hours_in_day = 24
idx = searchsortedlast(win_func_data.wf_durations_in_hrs,D*hours_in_day)
if idx == 0
return 1
elseif idx<length(win_func_data.wf_durations_in_hrs)
if D*hours_in_day-win_func_data.wf_durations_in_hrs[idx]>win_func_data.wf_durations_in_hrs[idx+1]-D*hours_in_day
idx += 1
end
end
return idx
end
function eval_window_function(wf_idx::Int64=-1; Duration::Float64=0., Period::Float64=0.)::Float64
D_idx = calc_duration_idx(Duration)
P_idx = calc_period_idx(Period)
wf = eval_window_function(wf_idx,D_idx,P_idx)
# TODO SCI DETAIL: Improve way deal with missing wf values for some durations. Interpolate?
while wf<=zero(wf) && D_idx<length(win_func_data.wf_durations_in_hrs)
D_idx += 1
wf = eval_window_function(wf_idx,D_idx,P_idx)
end
return wf
end
function eval_window_function(wf_idx::Int64, D_idx::Int64, P_idx::Int64)::Float64
global win_func_data
#@assert(1<=wf_idx<maximum(win_func_data.window_function_id_arr))
#@assert(1<=P_idx<=length(win_func_data.wf_periods_in_days))
#@assert(1<=D_idx<=length(win_func_data.wf_durations_in_hrs))
return win_func_data.window_func_array[wf_idx,D_idx,P_idx]
end
#Object for storing data necessary for OSD_interpolator
struct OSD_data{T1<:Real, T2<:Real}
allosds::Array{T1,3}
kepids::Array{Int64,1}
#periods_length::Int64
#durations_length::Int64
grid::Array{Array{T2,1},1}
function OSD_data(data::AbstractArray{T1,3}, kepids::AbstractArray{Int64,1}, durations::AbstractArray{T2,1}, periods::AbstractArray{T2,1} ) where {T1<:Real, T2<:Real}
@assert(size(data,1)==length(kepids))
@assert(size(data,2)==length(durations))
@assert(size(data,3)==length(periods))
@assert issorted(kepids)
@assert issorted(durations)
@assert issorted(periods)
new{T1,T2}(data, kepids, [durations,periods])
end
end
# Only point of this version is to provide a drop in replacement for Keir's code
function OSD_data(allosds::AbstractArray{T1,3}, kepids::AbstractArray{T3,1}, periods_length::Int64, durations_length::Int64, grid::Array{Array{T2,1},1}) where {T1<:Real, T2<:Real, T3<:Real}
@assert length(grid) == 2
@assert durations_length == length(grid[1])
@assert periods_length == length(grid[2])
if eltype(kepids) != Int64
kepids = round.(Int64,kepids)
end
@assert grid[1][1] == 1.5 # Checking that durations were passed as hours, since that's what Keir's assumed
return OSD_data(allosds, kepids, grid[1] ./ 24.0 , grid[2]) # Convert durations to days, since that's units in rest of SysSim
end
num_stars(osd::OSD_data) = size(osd.allosds,1)
num_durations(osd::OSD_data) = size(osd.allosds,2)
num_periods(osd::OSD_data) = size(osd.allosds,3)
function setup_OSD(sim_param::SimParam; force_reread::Bool = false) #reads in 3D table of OSD values and sets up global variables to be used in interpolation
global OSD_setup
if haskey(sim_param,"read_OSD_function") && !force_reread
return OSD_setup
end
#OSD_file = load(joinpath(Pkg.dir(), "ExoplanetsSysSim", "data", convert(String,get(sim_param,"osd_file","allosds.jld"))))
#OSD_file = load(joinpath(Pkg.dir(), "ExoplanetsSysSim", "data", convert(String,get(sim_param,"osd_file","allosds.jld"))))
#OSD_file = load(joinpath(dirname(pathof(ExoplanetsSysSim)),"data",convert(String,get(sim_param,"osd_file","allosds.jld"))))
#OSD_file = load(joinpath(dirname(pathof(ExoplanetsSysSim)),"..","data",convert(String,get(sim_param,"osd_file","dr25fgk_relaxcut_osds.jld"))))
OSD_file = load(joinpath(dirname(pathof(ExoplanetsSysSim)),"..","data",convert(String,get(sim_param,"osd_file","dr25fgk_small_osds.jld2"))))
allosds = OSD_file["allosds"] #table of OSDs with dimensions: kepids,durations,periods
periods = OSD_file["periods"][1,:] #1000 period values corresponding to OSD values in the third dimension of the allosds table
kepids = OSD_file["kepids"] #kepids corresponding to OSD values in the first dimension of the allosds table
OSD_file = 0 # unload OSD file to save memory
#durations = [1.5,2.,2.5,3.,3.5,4.5,5.,6.,7.5,9.,10.5,12.,12.5,15.] #14 durations corresponding to OSD values in the first dimension of theh allosds table
periods_length = length(allosds[1,1,:])
durations_length = length(allosds[1,:,1])
@assert length(cdpp_durations) == durations_length
grid = Array{Float64,1}[] #grid used in OSD_interpolator
push!(grid, cdpp_durations)
push!(grid, periods)
#global compareNoise = Float64[] #testing variable used to make sure OSD_interpolator is producing reasonable snrs
OSD_setup = OSD_data(allosds, kepids, periods_length, durations_length, grid)
allosds = 0 # unload OSD table to save memory
add_param_fixed(sim_param,"read_OSD_function",true)
return OSD_setup
end
setup_OSD_interp(sim_param::SimParam; force_reread::Bool = false) = setup_OSD(sim_param, force_reread=force_reread)
function find_index_lower_bounding_point(grid::AbstractArray{T1,1}, x::T2; verbose::Bool = false) where {T1<:Real, T2<:Real}
if verbose
@assert issorted(grid)
@assert length(grid)>=2
println("# ", grid[1], " <= ", x, " <= ", grid[end])
@assert grid[1] <= x <= grid[end]
end
idx = searchsortedlast(grid,x)
if idx == 0
idx = 1
#elseif idx >= length(grid) # should never happen
# idx = length(grid)-1
end
return idx
end
function interp_OSD_from_table(kepid::Int64, period::T2, duration::T3; verbose::Bool = false) where {T2<:Real, T3<:Real}
@assert eltype(OSD_setup.kepids) == Int64 # otherwise would need something like kepid = convert(eltype(OSD_setup.kepids),kepid)
kepid_idx = searchsortedfirst(OSD_setup.kepids, kepid)
if (kepid_idx > num_stars(OSD_setup)) || (OSD_setup.kepids[kepid_idx] != kepid) # if we don't find the kepid in allosds.jld, then we make a random one
kepid_idx = rand(1:size(OSD_setup.allosds,1))
if verbose
println("# picked random kepid = ", OSD_setup.kepids[kepid_idx])
end
end
idx_duration = find_index_lower_bounding_point(OSD_setup.grid[1], duration)
idx_period = find_index_lower_bounding_point(OSD_setup.grid[2], period)
#z = view(OSD_setup.allosds,kepid_idx,idx_duration:(idx_duration+1),idx_period:(idx_period+1)) # use correct kepid index to extract 2D table from 3D OSD table
#= value = z[1,1] * w_dur * w_per +
z[2,1] * (1-w_dur) * w_per +
z[1,2] * w_dur * (1-w_per) +
z[2,2] * (1-w_dur) * (1-w_per) =#
if idx_duration < length(OSD_setup.grid[1]) && idx_period < length(OSD_setup.grid[2])
w_dur = (duration-OSD_setup.grid[1][idx_duration]) / (OSD_setup.grid[1][idx_duration+1]-OSD_setup.grid[1][idx_duration])
w_per = (period -OSD_setup.grid[2][idx_period]) / (OSD_setup.grid[2][idx_period+1] -OSD_setup.grid[2][idx_period])
value = OSD_setup.allosds[kepid_idx,idx_duration, idx_period ] * w_dur * w_per +
OSD_setup.allosds[kepid_idx,idx_duration+1,idx_period ] * (1-w_dur) * w_per +
OSD_setup.allosds[kepid_idx,idx_duration, idx_period+1] * w_dur * (1-w_per) +
OSD_setup.allosds[kepid_idx,idx_duration+1,idx_period+1] * (1-w_dur) * (1-w_per)
elseif idx_period < length(OSD_setup.grid[2])
w_per = (period -OSD_setup.grid[2][idx_period]) / (OSD_setup.grid[2][idx_period+1] -OSD_setup.grid[2][idx_period])
value = (OSD_setup.allosds[kepid_idx,idx_duration, idx_period+1] - OSD_setup.allosds[kepid_idx,idx_duration, idx_period ]) * w_per +
OSD_setup.allosds[kepid_idx,idx_duration, idx_period ]
elseif idx_duration < length(OSD_setup.grid[1])
w_dur = (duration-OSD_setup.grid[1][idx_duration]) / (OSD_setup.grid[1][idx_duration+1]-OSD_setup.grid[1][idx_duration])
value = (OSD_setup.allosds[kepid_idx,idx_duration+1,idx_period ] - OSD_setup.allosds[kepid_idx,idx_duration, idx_period ]) * w_dur +
OSD_setup.allosds[kepid_idx,idx_duration,idx_period ]
else
value = OSD_setup.allosds[kepid_idx,idx_duration, idx_period ]
end
end
# function interp_OSD_from_table(kepid::Int64, period::Real, duration::Real)
# kepid = convert(Float64,kepid)
# meskep = OSD_setup.kepids #we need to find the index that this planet's kepid corresponds to in allosds.jld
# kepid_index = findfirst(meskep, kepid)
# if kepid_index == 0
# kepid_index = rand(1:88807) #if we don't find the kepid in allosds.jld, then we make a random one
# end
# olOSD = OSD_setup.allosds[kepid_index,:,:] #use correct kepid index to extract 2D table from 3D OSD table
# # olOSD = convert(Array{Float64,2},olOSD)
# @time lint = Lininterp(olOSD, OSD_setup.grid) #sets up linear interpolation object
# osd = ApproXD.eval2D(lint, [duration*24,period])[1] #interpolates osd
# return osd
# end
# function cdpp_vs_osd(ratio::Float64, cuantos::Int64)
# #testing function that takes ratios of cdpp_snr/osd_snr and plots a histogram to make sure the results are reasonable.
# global compareNoise
# push!(compareNoise,ratio)
# if length(compareNoise) == cuantos
# PyPlot.plt[:hist](compareNoise,100)
# println("MES median: ",median(compareNoise)," MES mean: ",mean(compareNoise), " Standard deviation: ",std(compareNoise))
# cuantos = 100000000
# end
# return cuantos
# end
end # module WindowFunction
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | code | 1100 | using Test
# write your own tests here
@test 1 == 1
using DataFrames
using ExoplanetsSysSim
function run_constructor_tests()
ExoplanetsSysSim.SimulationParameters.test_sim_param_constructors()
sim_param = ExoplanetsSysSim.setup_sim_param_demo()
ExoplanetsSysSim.test_orbit_constructors()
ExoplanetsSysSim.test_planet_constructors(sim_param)
ExoplanetsSysSim.test_star_constructors(sim_param)
ExoplanetsSysSim.test_planetary_system_constructors(sim_param)
ExoplanetsSysSim.test_target(sim_param)
ExoplanetsSysSim.test_transit_observations(sim_param)
(cat_phys, cat_obs) = ExoplanetsSysSim.test_catalog_constructors(sim_param)
ExoplanetsSysSim.test_summary_statistics(cat_obs, cat_phys, sim_param)
ExoplanetsSysSim.test_abc_distance(cat_obs, cat_phys, sim_param)
return 0
end
@test run_constructor_tests() == 0 # Just tests that the basic elements compile and run # TODO: Write tests that will be useful in diagnosing any bugs
#Test CORBITS moved from ExoplanetsSysSim.test_corbits() to
using CORBITS
include(joinpath(dirname(pathof(CORBITS)),"..","test","runtests.jl"))
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | docs | 12332 | # ExoplanetsSysSim
[](https://zenodo.org/badge/latestdoi/179777476)
Welcome to the ExoplanetsSysSim package for generating planetary systems and simulating observations of those systems with a transit survey. Currently, SysSim focuses on NASA's Kepler mission, but we've aimed to develop a generic framework that can be applied to other surveys (e.g., K2, TESS, PLATO, LSST, etc.).
## How to install ExoplanetsSysSim:
* Make sure you have julia (v1.6 or greater) installed. You can download Julia [here](https://julialang.org/downloads/).
* Make sure you have a recent git and [git-lfs](https://git-lfs.github.com/) installed.
If you're using ICS-ACI, then do this by running the following for each shell (or putting it in your .bashrc)
```sh
export PATH=/gpfs/group/dml129/default/sw/git-lfs:$PATH
module load git
```
* If you want to use ssh keys instead of https authentication (to minimize typing your github password), then:
* Setup a local ssh key using ssh-keygen
* Tell Github about your ssh key: Person Icon (upper right), Settings, SSH & GPG keys, New SSH Key. Enter a name in the title box and paste the contents of `cat ~/.ssh/id_rsa.pub` into the "Key" box. Add SSH Key.
* Create a clone of the [SysSimData repository](https://github.com/ExoJulia/SysSimData).
- If you might want to add/edit files in the SysSimData repository, then please fork your own repository on github and clone that instead of the repo in ExoJulia. Then you can create pull requests when you're ready to add/update files in the main repository.
- If you plan to use existing SysSimData files, then you can just create a new copy, use `git clone`. I suggest somewhere outside of your home directory, .julia or JULIA_DEPOT_PATH.
Once you've got a clone of a SysSimData repository, initialize and update the submodules. Git "should" automatically download large files via git-lfs. If not, then you can cd into the directory and run `git lfs fetch` to force it to update. For example,
```sh
git clone [email protected]:ExoJulia/SysSimData.git
cd SysSimData
git submodule init
git submodule update
git lfs fetch # if the binary data files didn't download automatically
```
- If you're using ICS-ACI, then you could simply use the repo in /storage/home/ebf11/group/ebf11/kepler/SysSimData that should already be set up
* Make sure that your JULIA_DEPOT_PATH (~/.julia by default) does not include an old version of CORBITS or ExopalnetsSysSim. If this is your first time using julia v1.0, then you probably don't need to do anything. Otherwise, I see two ways to do this:
- One way to avoid conflicts is to move or delete the JULIA_DEPOT_PATH. But if there's _any chance_ that you might have things in your current CORBITS or ExoplanetsSysSim repots that you want to keep, then move rather than delete (or make a backup copy of those repos before deleting them). Simillarly, if there are any other packages you've been developing, make sure you have a backup copy before deleting your JULIA_DEPOT_PATH. Once you've fully cleared out the old repos, then 'Pkg.rm("CORBITS"); Pkg.rm("ExoplanetsSysSim"); Pkg.gc()' and 'rm -rf CORBITS ExoplanetsSysSim' both from the dev subdirectory of your JULIA_DEPOT_PATH (~/.julia by default). Warning: Sometimes Julia manages to keep these around despite my efforts to delete them, so I've found it's easier to rename my .julia directory and then copy any other repos in development mode back to my new .julia directory.
- Another way to avoid conflicts with old versions is to sepcify a new JULIA_DEPOT_PATH. However, if you go this route, then you'll need to make sure that this environment variable is set to the desired depot in each of your future shell sessions.
```sh
export JULIA_DEPOT_PATH=~/.julia_clean
```
On ICS-ACI/Roar, it's useful to set your JULIA_DEPOT_PATH to be in your work directory, as that is higher performance and has more space than your home directory. I've put this in my .bashrc, so I don't forget and get confused about what's being modified. E.g.,
```sh
export JULIA_DEPOT_PATH=~/work/.julia
```
* Run julia and install the ExoplanetsSysSim repo as a Julia package.
- If you will only be using it as is, then you can simply add the registered repo under the ExoJulia organization.
```julia
using Pkg
Pkg.add("ExoplanetsSysSim")
```
- However, if you may be modifying source code in the ExoplanetsSysSim directory itself, then please fork your own version on github and develop that version instead. For example,
```julia
Pkg.develop(PackageSpec(url="[email protected]:ExoJulia/ExoplanetsSysSim.jl.git"))
```
(but replacing ExoJulia with the github username associated with your fork). If you've set ExoplanetsSysSim to be under development, Julia will not automatically update it. You'll have to do a `git pull` from dev/ExoplanetsSysSim to merge in new updates.
- Some MacOS users find that CORBITS does not build successfully. This does not prevent MacOS users from using SysSim in "single-observer mode" (which is the mode used for existing publications).
* Create a symlink so 'data' in the ExoplanetsSysSim directory points to the SysSimData repo.
- Change into the directory where you've added or developing ExoplanetSysSim (likely ${JULIA_DEPOT_PATH}/dev/ExoplanetsSysSim).
- Create a symlink named data
```sh
cd .julia/dev/ExoplanetsSysSim
#cd ${JULIA_DEPOT_PATH}/dev/ExoplanetsSysSim # alternative if you set JULIA_DEPOT_PATH
ln -s PATH_TO_SYSSIMDATA data
```
- Alternatively, you can override the default file paths to point to wherever you placed the binary input files. Although this probably require more work.
* Optionally, run some tests, e.g.
```julia
using ExoplanetsSysSim
include(joinpath(dirname(pathof(ExoplanetsSysSim)),"..","test","runtests.jl"))
```
## How to use SysSim for your own Projects
- Install ExoplanetsSysSim (see above)
- Create your own repository containing code that will call ExoplanetsSysSim
- Make it a Julia project by adding dependencies, including ExoplanetsSysSim.
- Make your project depend on either the registered version of ExoplanetsSysSim or the version in your development directory. Since you've already installed ExoplanetSysSim, then Julia should find and reuse the code in the dev directory rather than reinstalling it.
```julia
using Pkg
Pkg.activate(".")
Pkg.instantiate()
Pkg.add("ExoplanetsSysSim") # For the registered version of ExoplanetsSysSim
# Pkg.develop("ExoplanetsSysSim") # To use your development branch of ExoplanetsSysSim.
```
- Have your project code load ExoplanetsSysSim and use it
```julia
using ExoplanetsSysSim
...
```
- At the moment, you can test using 'examples/generate_catalogs.jl' from Matthias's project at https://github.com/ExoJulia/SysSimExClusters
- By default, the master branch includes recent updates. There is a chance that we occasionally break something that's not part of our test set. Therefore, we've created a [stable branch](https://github.com/ExoJulia/ExoplanetsSysSim.jl/tree/stable) which users may wish to use for science results to be published. If you find something broken in the stable branch, then please check the [open issues](https://github.com/ExoJulia/ExoplanetsSysSim.jl/issues). If we're not yet aware of your problem, then notify the SysSim team via a new GitHub issue.
* Write your papers and share your code as a GitHub repo
- If you want to share your Manifest.toml file, then make a copy of the Manifest.toml when you're not in develop mode. Otherwise, users on other systems will get errors, since they can't access the same path with your development version.
- If you'd like your code to appear as part of the [ExoJulia organization](https://github.com/ExoJulia/), then please let [Eric](https://github.com/eford) know.
* Cite relevant code and associated publications
- [](https://zenodo.org/badge/latestdoi/179777476) Citation for core SysSim code base.
- [Hsu et al. (2018) *AJ* 155, 205.](https://arxiv.org/ct?url=https%3A%2F%2Fdx.doi.org%2F10.3847%2F1538-3881%2Faab9a8&v=19ae32f8): First published paper, describes basic SysSim functionality pre-1.0 version.
- [Hsu et al. (2019) *AJ* 158, 3.](https://doi.org/10.3847/1538-3881/ab31ab) "Occurrence Rates of Planets Orbiting FGK Stars: Combining Kepler DR25, Gaia DR2, and Bayesian Inference": Describes improvements to model for Kepler pipeline in SysSim v1.0, please cite if using SysSim v1.
- [Hsu, Ford & Terrien (2020) *MNRAS* 498, 2249-2262.](https://ui.adsabs.harvard.edu/abs/2020MNRAS.498.2249H/abstract) "Occurrence Rates of Planets Orbiting M Stars: Applying ABC to Kepler DR25, Gaia DR2, and 2MASS Data": Describes tweaks for M stars. Please cite for occurence rates for M (and late K) stars.
- [He, Ford, & Ragozzine (2019) *MNRAS* 490, 4575-4605.](https://doi.org/10.1093/mnras/stz2869) "Architectures of Exoplanetary Systems. I: A Clustered Forward Model for Exoplanetary Systems Around Kepler’s FGK Stars": Describes model for generating planetary systems, uses SysSim v1.0. Please cite if using clustered model with two populations with different inclination distributions.
- [He, Ford, & Ragozzine (2020a) *AJ* 161, 16 (24pp).](https://iopscience.iop.org/article/10.3847/1538-3881/abc68b) "Architectures of Exoplanetary Systems. II: An Increase in Inner Planetary System Occurrence Toward Later Spectral Types for Kepler's FGK Dwarfs":
Describes model for the fraction of stars with planets as a function of spectral type, uses SysSim v1.0, please cite if using results for how occurrence rates or architectures depend on stellar properties.
- [He et al. (2020b) *AJ* 160, 276 (38pp).](https://iopscience.iop.org/article/10.3847/1538-3881/abba18) "Architectures of Exoplanetary Systems. III: Eccentricity and Mutual Inclination Distributions of AMD-stable Planetary Systems": Describes model for generating planetary systems, uses SysSim v1.1, please cite if using clustered model with one population of broader eccentricity and mutual inclination distributions based on AMD stability.
- [Brakensiek & Ragozzine (2016) ApJ 821, 47.](https://doi.org/10.3847/0004-637X/821/1/47)
Citation for CORBITS, please cite if you make use of averaging over viewing geometries.
* Please let the SysSim team know about your publication (or other use of SysSim, e.g., proposals) via pull request.*
## The SysSim Team:
### Key Developers:
* Eric Ford: Conceptual framework, Development of core codebase
* Matthias He: Development and application of clustered multi-planet model
* Danley Hsu: Validation of Kepler model, distance functions and application to planet occurence rates
* Darin Ragozzine: Conceptual framework, Incorporating DR25 data products
### Other Contributors/Consultants:
* Robert Morehead: Preliminary model development, exploratory applications of ABC and comparing distance functions.
* Keir Ashby: Testing incorporation of DR 25 data products
* Jessi Cisewski: Advice on statistical methodlogy
* Chad Schafer: Advice on statistical methodlogy
* Tom Loredo: Advice on statistical methodlogy
* Robert Wolpert: Advice on statistical methodlogy
### Acknowledgements:
* NASA
* [Kepler Mission](https://www.nasa.gov/mission_pages/kepler/main/index.html)
* [Kepler Science Team](https://www.nasa.gov/mission_pages/kepler/team/teamroster)
* Kepler Multi-body & Transit Timing Variations Working Groups
* Origins of Solar Systems program, award NNX14AI76G
* Exoplanets Research Program, award NNX15AE21G
* [The Pennsylvania State University](https://www.psu.edu/)
* [Dept. of Astronomy & Astrophysics](http://science.psu.edu/astro/)
* [Center for Exoplanets & Habitable Worlds](https://exoplanets.psu.edu/)
* [Eberly College of Science](http://science.psu.edu/)
* [Institute for Computational & Data Sciences](https://icds.psu.edu/)
* [Center for Astrostatistics](https://astrostatistics.psu.edu/)
* [Penn State Astrobiology Research Center](https://astrobiology.psu.edu/)
* [Brigham Young University](https://www.physics.byu.edu/)
* [University of Florida](https://www.ufl.edu/)
* [Florida Institute of Technology](https://www.fit.edu/)
* Statistical and Applied Mathematical Sciences Institute
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | docs | 2591 | # Examples of ExoplanetsSysSim in action
ExoplanetsSysSim users are encouraged to add links to their repositories with example use cases, including simple examples, advanced tutorials, and repositories in support of scientific publications.
## Publications using ExoplanetsSysSim
- [Hsu et al. (2018)](https://doi.org/10.3847/1538-3881/aab9a8) *AJ* 155, 5. "Improving the Accuracy of Planet Occurrence Rates from Kepler Using Approximate Bayesian Computation":
See [old repository](https://github.com/dch216/ExoplanetsSysSim.jl/tree/hsu-et-al-2018/examples/hsu_etal_2018) for code that works with Julia v0.6 (before the examples were separated from main code base).
- [Hsu et al. (2019)](https://doi.org/10.3847/1538-3881/ab31ab) *AJ* 158, 3. "Occurrence Rates of Planets Orbiting FGK Stars: Combining Kepler DR25, Gaia DR2, and Bayesian Inference":
See [dr25_gaia_fgk folder](https://github.com/ExoJulia/ExoplanetsSysSim.jl/tree/master/examples/dr25_gaia_fgk) of this repository.
- [He et al. (2019)](https://doi.org/10.1093/mnras/stz2869) *MNRAS* 490, 4575 "Architectures of exoplanetary systems – I. A clustered forward model for exoplanetary systems around Kepler’s FGK stars":
See [He_Ford_Ragozzine_2019 branch](https://github.com/ExoJulia/SysSimExClusters/tree/He_Ford_Ragozzine_2019) of [SysSimExClusters repository](https://github.com/ExoJulia/SysSimExClusters).
- [Hsu et al. (2020)](https://ui.adsabs.harvard.edu/abs/2020arXiv200202573H/abstract) submitted to *MNRAS* (arXiv:2002.02573). "Occurrence Rates of Planets Orbiting M Stars: Applying ABC to Kepler DR25, Gaia DR2, and 2MASS Data":
See [dr25_gaia_m folder](https://github.com/ExoJulia/ExoplanetsSysSim.jl/tree/master/examples/dr25_gaia_m) of this repository.
- [He et al. (2019)](https://ui.adsabs.harvard.edu/abs/2020arXiv200304348H/abstract) submitted to *MNRAS* (arXiv:2003.04348). "Architectures of Exoplanetary Systems. II: An Intrinsic Relation between Planetary System Occurrence and Spectral Type for Kepler's FGK Dwarfs":
See [He_Ford_Ragozzine_2020 branch](https://github.com/ExoJulia/SysSimExClusters/tree/He_Ford_Ragozzine_2020) of [SysSimExClusters repository](https://github.com/ExoJulia/SysSimExClusters).
- [Gilbert & Fabrycky (2020)](https://ui.adsabs.harvard.edu/abs/2020arXiv200311098G/abstract) submitted to *AJ* (arXiv:2003.11098) "An information theoretic framework for classifying exoplanetary system architectures".
Used posterior samples from [He et al. (2019)](https://doi.org/10.1093/mnras/stz2869).
## Simple examples & tutorials
- Users are encouraged to add links here.
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | docs | 1182 | # Required Files:
To reproduce a calculation from Hsu et al. (2019), you will need the following data files (under the 'data' sub-directory of the repository):
* "DR25topwinfuncs.jld": Summary of window function data taken from the DR25 Completeness Products
* "dr25fgk_osds.jld": One-sigma depth functions for the filtered catalog of Kepler planet search targets (Not part of the repository, download from: https://scholarsphere.psu.edu/resources/460cb19b-86e9-4c04-ac17-69950444437f).
* "KeplerMAST_TargetProperties.csv": Summary of key properties for Kepler Targets that are not included in the other catalogs.
* "q1q17_dr25_gaia_fgk.jld": Filtered DR25 stellar catalog of FGK targets (see Hsu et al. 2019 for explanation of cuts).
* "q1_q17_dr25_koi.csv": DR25 KOI catalog with transit depths replaced with median depths from the MCMC posterior chains.
# Running Calculation:
To perform the actual calculations, you run:
```
> julia abc_run.jl
```
This defaults to computing a planet candidate occurrence rate and rate density for 5 period-radius bins spanning 0.5-1.75 R_Earth and 237-500 days.
For other ranges, you'd edit the values of p_bin_lim and r_bin_lim in param.in.
| ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 1.2.1 | 7f3df18f94c685d258b46a2adadcc0c74f4b4423 | docs | 1048 | # Required Files:
To reproduce a calculation from Hsu et al. (2019), you will need the following data files (under the 'data' sub-directory of the repository):
* "DR25topwinfuncs.jld": Summary of window function data taken from the DR25 Completeness Products
* "dr25m_osds.jld": One-sigma depth functions for the filtered catalog of Kepler planet search targets.
* "KeplerMAST_TargetProperties.csv": Summary of key properties for Kepler Targets that are not included in the other catalogs.
* "q1q17_dr25_gaia_m.jld": Filtered DR25 stellar catalog of M targets (see Hsu et al. 2020 for explanation of cuts).
* "q1_q17_dr25_koi.csv": DR25 KOI catalog with transit depths replaced with median depths from the MCMC posterior chains.
# Running Calculation:
To perform the actual calculations, you run:
```
> julia abc_run.jl
```
This defaults to computing a planet candidate occurrence rate and rate density for 7 period-radius bins spanning 0.25-4 R_Earth and 8-16 days.
For other ranges, you'd edit the values of p_bin_lim and r_bin_lim in param.in. | ExoplanetsSysSim | https://github.com/ExoJulia/ExoplanetsSysSim.jl.git |
|
[
"MIT"
] | 0.1.0 | be34ebffeca107c3e948740ec63239c684a20a34 | code | 94 | module Gershgorin
export gershgorin, gershgorin!, overlap, get_discs
include("discs.jl")
end | Gershgorin | https://github.com/v715/Gershgorin.jl.git |
|
[
"MIT"
] | 0.1.0 | be34ebffeca107c3e948740ec63239c684a20a34 | code | 1638 | using LinearAlgebra
using LazySets
using Plots, LaTeXStrings
complex2array(x::T) where {T<:Complex} = [x.re, x.im]
function get_discs(A::AbstractMatrix)
centers = diag(A) |> complex |> x -> complex2array.(x)
radii = A - Diagonal(A) |> x -> abs.(x) |> M -> sum(M, dims=2) |> real
discs = [Ball2(c, r) for (c, r) in zip(centers, radii)]
return discs
end
function parse_label(label::Union{String,LaTeXString}, dim::Int)
if label == ""
nothing
else
label = reshape([i == 1 ? label : nothing for i in 1:dim][:, :], (1, dim))
end
return label
end
function gershgorin(A::AbstractMatrix; c=:blue, label="", alpha=0.2)
discs = get_discs(A)
label = parse_label(label, length(discs))
legend = label == "" ? false : true
plot(discs, c=c, fillalpha=alpha, lw=0, aspect_ratio=1,
legend=legend, label=label,
xlabel="Real Axis", ylabel="Imaginary Axis")
end
function gershgorin!(A::AbstractMatrix; c=:blue, label="", alpha=0.2)
discs = get_discs(A)
label = parse_label(label, length(discs))
legend = label == "" ? false : true
plot!(discs, c=c, fillalpha=alpha, lw=0, aspect_ratio=1,
legend=legend, label=label,
xlabel="Real Axis", ylabel="Imaginary Axis")
end
function overlap(A::AbstractMatrix, B::AbstractMatrix; c=:white, alpha=0.8)
discs_A = get_discs(A)
discs_B = get_discs(B)
intersections = [discs_A[i] ∩ discs_B[j] for i in 1:length(discs_A) for j in 1:length(discs_B)]
overlap = UnionSetArray(intersections)
plot!(overlap, aspect_ratio=1, lw=0, c=c, fillalpha=alpha, label="Overlap", legend=true)
end
| Gershgorin | https://github.com/v715/Gershgorin.jl.git |
|
[
"MIT"
] | 0.1.0 | be34ebffeca107c3e948740ec63239c684a20a34 | code | 93 | using Gershgorin
using Test
@testset "Gershgorin.jl" begin
# Write your tests here.
end
| Gershgorin | https://github.com/v715/Gershgorin.jl.git |
|
[
"MIT"
] | 0.1.0 | be34ebffeca107c3e948740ec63239c684a20a34 | docs | 1713 | # Gershgorin
Visualize the Gershgorin discs that bound the spectrum of a square matrix (see the [Gershgorin disc theorem](https://en.wikipedia.org/wiki/Gershgorin_circle_theorem)).
## Installation
```zsh
] add https://github.com/v715/Gershgorin.jl.git
```
## Quickstart Guide
We can visualize the Gershgorin discs for a random complex matrix and its transpose.
Note that a matrix and its transpose have the same eigenvalues, so these eigenvalues will lie in the intersection of the Gershgorin regions of these two matrices.
```Julia
using LinearAlgebra
using Plots, LaTeXStrings
using Gershgorin
# Make a random (5,5) real matrix
M = randn(5, 5)
# Plot Gershgorin's discs
gershgorin(M; c=:blue, label=L"$M$")
λ = eigvals(M)
p1 = plot!(λ, seriestype=[:scatter], c=:black, label=L"$\lambda(M)$")
# Now do the same for the transpose
gershgorin(transpose(M); c=:red, label=L"$M^T$")
λ = eigvals(transpose(M))
p2 = plot!(λ, seriestype=[:scatter], c=:black, label=L"$\lambda(M^T)$")
# Plot the intersection between the two sets of regions
gershgorin(M; c=:blue)
gershgorin!(transpose(M); c=:red)
overlap(M, transpose(M), c=:black, alpha=1)
p3 = plot!(λ, seriestype=[:scatter], c=:black, label=L"$\lambda(M)$")
plot(p1, p2, p3, link=:all, dpi=300, layout=(1,3), size=(750,350))
```

Additionally, if you just want to get the Gershgorin discs for a matrix, you can use the `get_discs` function.
```Julia
discs = get_discs(M)
plot(discs, c=:blue, alpha=0.2, lw=0)
plot!(eigvals(M), seriestype=[:scatter], c=:black, label=L"$\lambda(M)$", aspect_ratio=1) |> display
```

| Gershgorin | https://github.com/v715/Gershgorin.jl.git |
|
[
"MIT"
] | 1.1.4 | b494a8c911f2492a2e9085e1b8653154503c2c62 | code | 6038 | __precompile__()
"""
Module for dependent bootstrap procedures, by Colin T Bowers
Implemented bootstrap methods: \n
- IID
- Stationary
- Moving Block
- Circular Block
- NoOverlapBlock
Implemented block length selection procedures: \n
- Patton, Politis, and White (2009) Correction to Automatic Block Length Selection For The Dependent Bootstrap
Accepted input dataset types: \n
- Vector{<:Number}
- Matrix{<:Number} (where rows are observations and columns are variables)
- Vector{Vector{<:Number}} (where elements of inner vectors are observations and outer vectors are variables)
- DataFrame
- TimeSeries.TimeArray{T,N} (only for N = 1 and N = 2)
Additional input dataset types are easily added. Please open an issue at https://github.com/colintbowers/DependentBootstrap.jl
The module has only a single exported type: \n
- BootInput <-- Core input type accepted by all exported functions. Typically constructed via keyword method. See ?BootInput for more detail.
All exported functions exhibit the following keyword signatures: \n
- exported_func(data, bootinput::BootInput)
- exported_func(data ; kwargs...)
Most users will be content to use the keyword argument method. In practice, this method wraps a
keyword argument BootInput constructor, which is then input to the exported function BootInput
method. For more detail on accepted keywords, see ?BootInput. All exported functions then use
the input dataset and bootstrap methodology described in BootInput in order to return the
appropriate statistics. A list of exported functions follows: \n
- optblocklength <-- Estimate the optimal block length for the input dataset
- dbootinds <-- Get a vector of bootstrap resampling indices
- dbootdata <-- Get a vector of resampled datasets
- dbootlevel1 <-- Get a vector of level 1 resampled bootstrap statistics
- dboot <-- Get the level 2 bootstrap statistic
- dbootlevel2 <-- Identical to dboot. Included for completeness
- dbootvar <-- Wrapper on dboot that sets the level 2 statistic as the variance
- dbootconf <-- Wrapper on dboot that sets the level 2 statistic as a confidence interval
I use the phrases level 1 and level 2 statistics in this package in the same manner discussed in
Chapter 1 of Lahiri's textbook Resampling Methods for Dependent Data.
This package has an MIT license. Please see associated LICENSE.md file for more detail.
"""
module DependentBootstrap
using StatsBase, Distributions, DataFrames, TimeSeries
import Base: show
export BootInput,
optblocklength,
dbootinds,
dbootdata,
dbootlevel1,
dbootlevel2,
dboot,
dbootvar,
dbootconf
const NUM_RESAMPLE = 1000::Int #Default number of bootstrap resamples
#-----------------------------------------------------------------------------------------
# OLD CODE THAT USED Requires.jl TO LOAD DataFrames.jl and TimeSeries.jl
#-----------------------------------------------------------------------------------------
#In order to accommodate different types of datasets, we use the lazy loading
#features offered by the Requires package here, so that the modules in which these
#datasets are defined are not loaded unless they are actually needed. All functions
#that reference these modules are placed here inside __init__() (which is a
#requirement when using Requires package on Julia v0.7+). If you want to add a new
#dataset type to DependentBootstrap, it should be as simple as adding appropriate
#methods here. This module is structured so that nothing else should need to be done.
#The methods that need to be added are:
# num_obs(data::T)::Int <- Number of observations in dataset
# num_var(data::T)::Int <- Number of variables in dataset
# local_get_var(data::T, i::Int)::Vector <- Get the data associated with the ith variable and output as a Vector
# local_get_index(data::T, inds::Vector{Int})::T <- Resample data using resampling indices in inds
#For local_get_index, note that the output type should always match the input type of the dataset.
# function __init__()
# @require TimeSeries="9e3dc215-6440-5c97-bce1-76c03772f85e" begin
# (num_obs(data::TimeSeries.TimeArray{T,1})::Int) where {T} = size(data, 1)
# (num_obs(data::TimeSeries.TimeArray{T,2})::Int) where {T} = size(data, 1)
# (num_var(data::TimeSeries.TimeArray{T,1})::Int) where {T} = 1
# (num_var(data::TimeSeries.TimeArray{T,2})::Int) where {T} = size(data, 2)
# (local_get_var(data::TimeSeries.TimeArray{T,1}, i::Int)::Vector{T}) where {T} = i == 1 ? data.values[:] : error("Invalid index $(i) given data $(typeof(data)) with number of columns $(num_var(data))")
# (local_get_var(data::TimeSeries.TimeArray{T,2}, i::Int)::Vector{T}) where {T} = (1 <= i <= num_var(data)) ? data.values[:, i] : error("Invalid index $(i) given data $(typeof(data)) with number of columns $(num_var(data))")
# (local_get_index(data::TimeSeries.TimeArray{T,1}, inds::Vector{Int})::TimeSeries.TimeArray{T,1}) where {T} = TimeSeries.TimeArray(TimeSeries.timestamp(data), data.values[inds], TimeSeries.colnames(data) ; unchecked=true)
# (local_get_index(data::TimeSeries.TimeArray{T,2}, inds::Vector{Int})::TimeSeries.TimeArray{T,2}) where {T} = TimeSeries.TimeArray(TimeSeries.timestamp(data), data.values[inds, :], TimeSeries.colnames(data) ; unchecked=true)
# end
# @require DataFrames="a93c6f00-e57d-5684-b7b6-d8193f3e46c0" begin
# (num_obs(data::DataFrames.DataFrame)::Int) = size(data, 1)
# (num_var(data::DataFrames.DataFrame)::Int) = size(data, 2)
# (local_get_var(data::DataFrames.DataFrame, i::Int)) = (1 <= i <= num_var(data)) ? data[:, i] : error("Invalid index $(i) given data $(typeof(data)) with number of columns $(num_var(data))")
# (local_get_index(data::DataFrames.DataFrame, inds::Vector{Int})::DataFrames.DataFrame) = data[inds, :]
# end
# end
#-----------------------------------------------------------------------------------------
include("types.jl")
include("blocklength.jl")
include("bootinds.jl")
#include("tapered.jl")
include("core.jl")
end # module
| DependentBootstrap | https://github.com/colintbowers/DependentBootstrap.jl.git |
|
[
"MIT"
] | 1.1.4 | b494a8c911f2492a2e9085e1b8653154503c2c62 | code | 9814 |
"""
optblocklength(data, bi::BootInput)::Float64
optblocklength(data ; kwargs...)::Float64
Provides an estimate of the optimal block-length to use with a dependent bootstrap.
For multivariate datasets, optimal block length is estimated for each column of data, and then
bi.fblocklengthcombine, which is a function that maps Vector{Float64} to Float64, is called
to reduce the multiple estimates to a single estimates. The default value for fblocklengthcombine
is median.
Block length methods currently implemented include: \n
- Patton, Politis, White (2009) "Correction to Automatic Block Length Selection For the Dependent Bootstrap" \n
For all methods discussed above, bandwidth is estimated following Politis (2003) "Adaptive Bandwidth Choice", using the
flat-top kernel suggested in that paper.
"""
function optblocklength(x::AbstractVector{<:Number}, blm::BLPPW2009{P2003}, bootmethod::Tbm)::Float64 where {Tbm<:BootMethod}
length(x) < 3 && error("You must have at least 3 observations to estimate block length")
(M, xVar, covVec) = blocklength_ma_and_cor(x) #Bandwidth method currently forced to politis (2003)
kernelCovVec = blocklength_kernel_cov(covVec, M)
gHat = 0.0
for k = 1:M
gHat += 2 * k * kernelCovVec[k] #note, "2*" is because sum is from -M and M, but is symmetric about 0. Note, 0 term in sum vanishes since k=0 -> |k|=0
end
dHat = optblocklength_blppw2009_dhat(bootmethod, xVar, kernelCovVec)
#Equation 9 and 14 from Politis, White (2004)
blocklength = (2 * gHat^2 * length(x) / dHat)^(1/3)
blocklength = min(blocklength, ceil(min(3*sqrt(length(x)), length(x) / 3))) #Enforce upper bound on block length suggested by Patton
blocklength = max(blocklength, 1.0)
return blocklength
end
(optblocklength_blppw2009_dhat(a::BootStationary, xvar::Float64, kernelcovvec::Vector{Float64})::Float64) = 2 * (xvar + 2*sum(kernelcovvec))^2 #note, in expression (1 + 2*sum(kernelCovVec)), "2*" is because sum is from -M to M, but is symmetric around 0. "1+" is the zero term of the sum which is simply equal to unity.
(optblocklength_blppw2009_dhat(a::T, xvar::Float64, kernelcovvec::Vector{Float64})::Float64) where {T<:Union{BootCircular,BootMoving,BootIID,BootNoOverlap}} = (4/3) * (xvar + 2*sum(kernelcovvec))^2 #note, in expression (1 + 2*sum(kernelCovVec)), "2*" is because sum is from -M to M, but is symmetric around 0. "1+" is the zero term of the sum which is simply equal to unity.
#Block length selection method of Paparoditis, Politis (2002) "The Tapered Block Bootstrap for General Statistics From Stationary Sequences"
# function optblocklength(x::AbstractVector{<:Number}, blm::BLPP2002{P2003,Tkf}, bootmethod::Tbm)::Float64 where {Tbm<:BootMethod,Tkf<:KernelFunctionMethod}
# length(x) < 3 && error("You must have at least 3 observations to estimate block length")
# Tbm != BootTapered && println("WARNING: Optimal parameter values in pp2002 block length procedure are unknown for bootstrap method $(bootmethod).")
# (M, xVar, covVec) = blocklength_ma_and_cor(x)
# kernelCovVec = blocklength_kernel_cov(covVec, M)
# deltaUnknown = (xVar + 2*sum(kernelCovVec))^2 #Unknown parameter in Delta expression, section 4, Paparoditis, Politis (2002). (note, "1+" is for k=0 term in summation, "2*" is because summation is from -M to M, but is symmetric about 0)
# gammaUnknown = 0.0 #Unknown parameter in Gamma expression, start of section 4
# for k = 1:M
# gammaUnknown += 2 * k^2 * kernelCovVec[k] #note, "2*" is because sum is from -M and M, but is symmetric about 0. Note, 0 term in sum vanishes since k=0 -> k^2=0
# end
# (gammaHat, deltaHat) = optblocklength_blpp2002_param(blm.kernelfunction, gammaUnknown, deltaUnknown)
# #Equation 20 from Paraproditis, Politis (2002)
# blocklength = (4 * gammaHat^2 * length(x) / deltaHat)^(1/5)
# blocklength = min(blocklength, ceil(min(3*sqrt(length(x)), length(x) / 3))) #Enforce upper bound on block length suggested by Patton
# blocklength = max(blocklength, 1.0)
# return blocklength
# end
# (optblocklength_blpp2002_param(kf::KernelTrap, gu::Float64, du::Float64)::Float64) = (-5.45*gu, 1.099*du) #-5.45 = (1/2) * -10.9 = (1/2) * (w*w)''(0) / (w*w)(0) [OPTIMAL TRAP VALUES], see Paparoditis, Politis (2002), 1.099 = 2 * 0.5495 = 2 * int_{-1}^{1} ((w*w)^2(x) / (w*w)^2(0)) dx [OPTIMAL TRAP VALUES], see Paparoditis, Politis (2002)
# (optblocklength_blpp2002_param(kf::KernelSmooth, gu::Float64, du::Float64)::Float64) = (-5.175*gu, 1.1312*deltaUnknown) #-5.175 = (1/2) * -10.35 = (1/2) * (w*w)''(0) / (w*w)(0) [OPTIMAL SMOOTH VALUES], see Paparoditis, Politis (2002), 1.1312 = 2 * 0.5656 = 2 * int_{-1}^{1} ((w*w)^2(x) / (w*w)^2(0)) dx [OPTIMAL SMOOTH VALUES], see Paparoditis, Politis (2002)
# (optblocklength_blpp2002_param(kf::T, gu::Float64, du::Float64)::Float64) where {T} = error("Optimal parameters not known for input kernel function: $(kf)")
(optblocklength(x::AbstractVector{<:Number}, bi::BootInput{Tbm,BLDummy})::Float64) where {Tbm<:BootMethod} = error("Logic fail. It should not have been possible to call this method. Please file an issue with full stacktrace.")
(optblocklength(x::AbstractVector{<:Number}, bi::BootInput)::Float64) = optblocklength(x, bi.blocklengthmethod, bi.bootmethod)
#Multivariate dataset wrapper
(optblocklength(data, bl::Tbl, bm::Tbm, f::Tf)::Float64) where {Tbl<:BlockLengthMethod,Tbm<:BootMethod,Tf<:Function} = f([ optblocklength(local_get_var(data, i), bl, bm) for i = 1:num_var(data) ])
(optblocklength(data, bi::BootInput)::Float64) = bi.fblocklengthcombine([ optblocklength(local_get_var(data, i), bi) for i = 1:num_var(data) ])
#Keyword input wrapper
(optblocklength(data ; kwargs...)::Float64) = optblocklength(data, BootInput(data ; kwargs...))
#These two functions are used by several of the block-length selection procedures
function blocklength_ma_and_cor(x::AbstractVector{T})::Tuple{Int, Float64, Vector{Float64}} where {T<:Number}
(M, xVar, covVec) = bandwidth_politis_2003(x)
if M > 0.5 * length(x)
println("WARNING: Bandwidth in parameter estimation section of blocklength forced to half total number of observations. Data may contain excessive dependence.")
M = Int(round(0.5 * length(x)))
end
M < 2 && (M = 2) #Even though M output of bandwidth function is always greater than 2, the above check for excessively large M can result in M being reduced below 2 (admittedly only in very unusual circumstances)
length(covVec) < M && append!(covVec, autocov(x, length(covVec)+1:M)) #Get any additional autocovariances that we might need
return(M, xVar, covVec)
end
function blocklength_kernel_cov(covVec::AbstractVector{Float64}, M::Int)::Vector{Float64}
length(covVec) < M && error("Error in blocklength_kernel_cov likely caused by logic fail in blocklength_ma_and_cor. Please lodge a github issue, preferably with reproducible example.")
kernelCovVec = Float64[ kernel_politis_2003_flat_top(k / M) * covVec[k] for k = 1:M ]
return(kernelCovVec)
end
#Note, the following non-exported function is called in the ForecastEval package, so if you alter the function name you will
#need to adjust that package too.
"""
bandwidth_politis_2003(x::AbstractVector{T})::Tuple{Int, Float64, Vector{Float64}} where {T<:Number}
Implements the methodology from Politis (2003) "Adaptive Bandwidth Choice" to obtain a data-driven bandwidth estimate.
Return tuple is, in order, the bandwidth estimate, the variance of x, and the autocorrelations used to get the bandwidth estimate.
Note, most users won't be interested in the second and third output, but sometimes this routine will be called by other
functions that need these terms, so they are returned to avoid duplicate computation.
"""
function bandwidth_politis_2003(x::AbstractVector{T})::Tuple{Int, Float64, Vector{Float64}} where {T<:Number}
length(x) < 2 && error("Input data must have at least two observations")
adjustmentTerm = 1.0 #This term is used by me for debugging. It serves no statistical purpose.
politis_c = 2.0 #This value is again recommended in Politis (2003)
K = max(5, Int(ceil(sqrt(log(10, length(x)))))) #This value is recommended in Politis (2003)
mHat = 1
corVec = Float64[]
append!(corVec, autocor(x, 1:min(20, length(x)-1))) #I add autocorrelations in blocks as this is more efficient than doing it one at a time
corBound = politis_c * sqrt(log(10, length(x)) / length(x)) #Note, use of log base 10 is deliberate and recommended in Politis (2003)
KCounter = 0
mHatFound = false
for k = 1:length(x)-2
#Add one to counter if bound is satisfied, otherwise reset counter to 0
abs(corVec[k]) < corBound ? (KCounter += 1) : (KCounter = 0)
if KCounter >= K #We found K autocorrelations in a row that satisfy the bound, so break.
mHat = k - K + 1
mHatFound = true
break
end
#If we run out of autocorrelations to check, add another block of them to corVec
k == length(corVec) && append!(corVec, autocor(x, length(corVec)+1:min(length(corVec)+20, length(x)-1)))
end
mHatFound == false && (mHat = length(x) - 1) #Bound mHat in the case where we fail to hit the break condition
M = Int(ceil(adjustmentTerm * 2 * mHat)) #"2*" is a standard rule, see e.g. Politis (2003).
M > length(x) - 1 && (M = length(x) - 1) #Apply upper bound to M
M < 2 && (M = 2) #Apply lower bound to M
xVar = ((length(x)-1) / length(x)) * var(x) #Used to scale autocorrelations to autocovariances for return argument
return (M, xVar, xVar * corVec)
end
"""
kernel_politis_2003_flat_top(x::Float64)::Float64
Implements the flat-top kernel function discussed in Politis (2003) "Adaptive Bandwidth Choice"
"""
function kernel_politis_2003_flat_top(x::Float64)::Float64
x_abs = abs(x)
x_abs <= 0.5 && return 1.0
x_abs <= 1.0 && return (2.0 * (1 - x_abs))
return 0.0
end
| DependentBootstrap | https://github.com/colintbowers/DependentBootstrap.jl.git |
|
[
"MIT"
] | 1.1.4 | b494a8c911f2492a2e9085e1b8653154503c2c62 | code | 5107 |
#Local function for checking a BootInput is valid for a call to dbootvecinds
function check_bi_for_dbootvecinds(bi::BootInput)::Bool
bi.numobs < 1 && error("Number of observations must be strictly positive")
bi.numresample < 1 && error("Number of resamples must be strictly positive")
bi.numobsperresample < 1 && error("Number of observations per resample must be strictly positive")
bi.blocklength <= 0.0 && error("Block length must be strictly positive")
isnan(bi.blocklength) && error("Block length is set to NaN")
isinf(bi.blocklength) && error("Block length is infinite")
return true
end
"""
dbootinds_one(bi::BootInput)::Vector{Int}
dbootinds_one(data::T; kwargs...)::Vector{Int}
Returns a single resampling index that, when used to index the original dataset,
will provide a single resampled dataset.
A keyword method that calls the keyword constructor for BootInput is also provided. Please
use ?BootInput at the REPL for more detail on feasible keywords.
"""
(dbootinds_one(bi::BootInput{BootIID})::Vector{Int}) = rand(1:bi.numobs, bi.numobsperresample)
function dbootinds_one(bi::BootInput{BootStationary})::Vector{Int}
bi.blocklength <= 1.0 && return rand(1:bi.numobs, bi.numobsperresample)
inds = zeros(Int, bi.numobsperresample)
geo1 = Geometric(1 / bi.blocklength)
(c, geodraw) = (1, 1)
for n = 1:bi.numobsperresample
if c == geodraw #Start a new block
inds[n] = rand(1:bi.numobs)
geodraw = rand(geo1) + 1
c = 1
else #Next obs in existing block
inds[n-1] == bi.numobs ? (inds[n] = 1) : (inds[n] = inds[n-1] + 1)
c += 1
end
end
return inds
end
function dbootinds_one(bi::BootInput{BootMoving})::Vector{Int}
bl = ceil(Int, bi.blocklength)
bl == 1 && return rand(1:bi.numobs, bi.numobsperresample)
inds = zeros(Int, bi.numobsperresample)
blockstart_ub = max(1, bi.numobs-bl+1)
for n = 1:bl:bi.numobsperresample
inds[n] = rand(1:blockstart_ub) #Start of block
for s = n+1:min(n+bl-1, bi.numobsperresample) #Iterate through block (use of min avoids bounds error)
inds[s] = inds[s-1] + 1
end
end
return inds
end
function dbootinds_one(bi::BootInput{BootCircular})::Vector{Int}
bl = ceil(Int, bi.blocklength)
bl == 1 && return rand(1:bi.numobs, bi.numobsperresample)
inds = zeros(Int, bi.numobsperresample)
for n = 1:bl:bi.numobsperresample
inds[n] = rand(1:bi.numobs) #Start of block
for s = n+1:min(n+bl-1, bi.numobsperresample) #Iterate through block (use of min avoids bounds error)
inds[s-1] == bi.numobs ? (inds[s] = 1) : (inds[s] = inds[s-1] + 1)
end
end
return inds
end
function dbootinds_one(bi::BootInput{BootNoOverlap})::Vector{Int}
bl = ceil(Int, bi.blocklength)
bl == 1 && return rand(1:bi.numobs, bi.numobsperresample)
inds = zeros(Int, bi.numobsperresample)
blockstart_ub = max(1, bi.numobs-bl+1)
blockstartvalues = collect(1:bl:blockstart_ub) #Build valid set of start indices for any block
length(blockstartvalues) == 1 && error("Not enough observations to perform non-overlapping block bootstrap given block length: num obs = $(bi.numobs), block length = $(bl)")
(blockstartvalues[end] + bl - 1 > bi.numobs) && pop!(blockstartvalues)
for n = 1:bl:bi.numobsperresample
inds[n] = blockstartvalues[rand(1:length(blockstartvalues))] #Start of block
for s = n+1:min(n+bl-1, bi.numobsperresample) #Iterate through block (use of min avoids bounds error)
inds[s] = inds[s-1] + 1
end
end
return inds
end
#(dbootinds_one(bi::BootInput{BootTapered})::Vector{Int}) = error("Routines for the tapered block bootstrap are currently not completed. Users interested in contributing should check the package github page.")
(dbootinds_one(bi::BootInput{BootDummy})::Vector{Int}) = error("Logic fail. It should not have been possible to call this method. Please file an issue with full stacktrace.")
(dbootinds_one(data, bi::BootInput)::Vector{Int}) = dbootinds_one(bi)
(dbootinds_one(data ; kwargs...)::Vector{Int}) = dbootinds_one(data, BootInput(data ; kwargs...))
"""
dbootinds(data::T ; bi::BootInput)::Vector{Vector{Int}}
dbootinds(data::T ; kwargs...)::Vector{Vector{Int}}
Each inner vector of the returned Vector{Vector{Int}} provides indices that, when used to
index the original dataset, will provide a single resampled dataset.
A keyword method that calls the keyword constructor for BootInput is also provided. Please
use ?BootInput at the REPL for more detail on feasible keywords.
Please use dbootinds_one if you only want to obtain a single Vector{Int} resampling index.
"""
(dbootinds(bi::BootInput)::Vector{Vector{Int}}) = check_bi_for_dbootvecinds(bi) ? [ dbootinds_one(bi) for n = 1:bi.numresample ] : error("Logic fail in check_bi_for_dbootvecinds. Please file an issue.")
(dbootinds(data, bi::BootInput)::Vector{Vector{Int}}) = dbootinds(bi)
(dbootinds(data ; kwargs...)::Vector{Vector{Int}}) = dbootinds(data, BootInput(data ; kwargs...))
| DependentBootstrap | https://github.com/colintbowers/DependentBootstrap.jl.git |
|
[
"MIT"
] | 1.1.4 | b494a8c911f2492a2e9085e1b8653154503c2c62 | code | 8675 |
"num_obs <- Internal function used to determine the number of observations in the input dataset"
(num_obs(data::AbstractVector{T})::Int) where {T} = length(data)
(num_obs(data::AbstractMatrix{T})::Int) where {T} = size(data, 1)
(num_obs(data::Vector{Vector{T}})::Int) where {T} = (isempty(data) || any(length.(data) .!= length(data[1]))) ? error("Input dataset is empty, or inner vectors of input dataset do not have matching length: $(length.(data))") : length(data[1])
(num_obs(data::DataFrames.DataFrame)::Int) = size(data, 1)
(num_obs(data::TimeSeries.TimeArray{T,1})::Int) where {T} = size(data, 1)
(num_obs(data::TimeSeries.TimeArray{T,2})::Int) where {T} = size(data, 1)
"num_var <- Internal function used to determine the number of variables in the input dataset"
(num_var(data::AbstractVector{T})::Int) where {T} = 1
(num_var(data::AbstractMatrix{T})::Int) where {T} = size(data, 2)
(num_var(data::Vector{Vector{T}})::Int) where {T} = isempty(data) ? error("Input dataset is empty") : length(data)
(num_var(data::DataFrames.DataFrame)::Int) = size(data, 2)
(num_var(data::TimeSeries.TimeArray{T,1})::Int) where {T} = 1
(num_var(data::TimeSeries.TimeArray{T,2})::Int) where {T} = size(data, 2)
"local_get_var <- Internal function used to get the ith variable in dataset"
(local_get_var(data::AbstractVector{T}, i::Int)::Vector{T}) where {T} = i == 1 ? data[:] : error("Invalid index $(i) given data $(typeof(data))")
(local_get_var(data::AbstractMatrix{T}, i::Int)::Vector{T}) where {T} = (1 <= i <= size(data,2)) ? data[:, i] : error("Invalid index $(i) given data $(typeof(data)) with number of columns $(size(data, 2))")
(local_get_var(data::Vector{Vector{T}}, i::Int)::Vector{T}) where {T} = (1 <= i <= length(data)) ? data[i] : error("Invalid index $(i) given data $(typeof(data)) with outer length: $(length(data))")
(local_get_var(data::DataFrames.DataFrame, i::Int)) = (1 <= i <= num_var(data)) ? data[:, i] : error("Invalid index $(i) given data $(typeof(data)) with number of columns $(num_var(data))")
(local_get_var(data::TimeSeries.TimeArray{T,1}, i::Int)::Vector{T}) where {T} = i == 1 ? values(data) : error("Invalid index $(i) given data $(typeof(data)) with number of columns $(num_var(data))")
(local_get_var(data::TimeSeries.TimeArray{T,2}, i::Int)::Vector{T}) where {T} = (1 <= i <= num_var(data)) ? values(data)[:,i] : error("Invalid index $(i) given data $(typeof(data)) with number of columns $(num_var(data))")
"local_get_index <- Internal function used to resample the dataset data using the input resampling index inds"
(local_get_index(data::AbstractVector{T}, inds::Vector{Int})::Vector{T}) where {T} = data[inds]
(local_get_index(data::AbstractMatrix{T}, inds::Vector{Int})::Matrix{T}) where {T} = data[inds, :]
(local_get_index(data::Vector{Vector{T}}, inds::Vector{Int})::Vector{Vector{T}}) where {T} = [ y[inds] for y in data ]
(local_get_index(data::DataFrames.DataFrame, inds::Vector{Int})::DataFrames.DataFrame) = data[inds, :]
(local_get_index(data::TimeSeries.TimeArray{T,1}, inds::Vector{Int})::TimeSeries.TimeArray{T,1}) where {T} = TimeSeries.TimeArray(TimeSeries.timestamp(data), values(data)[inds], TimeSeries.colnames(data) ; unchecked=true)
(local_get_index(data::TimeSeries.TimeArray{T,2}, inds::Vector{Int})::TimeSeries.TimeArray{T,2}) where {T} = TimeSeries.TimeArray(TimeSeries.timestamp(data), values(data)[inds, :], TimeSeries.colnames(data) ; unchecked=true)
"""
dbootdata_one(data::T, bi::BootInput)::T
dbootdata_one(data::T; kwargs...)::T
Get a single resampled dataset of the input data using the dependent boostrap
methodology defined in BootInput.
A keyword method that calls the keyword constructor for BootInput is also provided. Please
use ?BootInput at the REPL for more detail on feasible keywords.
Note, the output type will always be the same as the type of the input data.
"""
(dbootdata_one(data::Td, bi::BootInput)::Td) where {Td} = local_get_index(data, dbootinds_one(bi))
(dbootdata_one(data::Td ; kwargs...)::Td) where {Td} = dbootdata_one(data, BootInput(data ; kwargs...))
# function dbootdata_one_infl(data_infl::Td, bi::BootInput{BootTapered})::TD where {Td}
# dataout = apply_inds_to_data(data_infl, dbootinds_one(bi))
# dboot_weight!(dataout, bi)
# return dataout
# end
# function (dbootdata_one(data::Td, bi::BootInput{BootTapered})::Td) where {Td}
# return dbootdata_one_infl(apply_influence_function(data), bi, bm)
# end
"""
dbootdata(data::T , bi::BootInput)::Vector{T}
dbootdata(data::T ; kwargs...)::Vector{T}
Get the resampled datasets of the input data using the dependent bootstrap
methodology defined in BootInput.
A keyword method that calls the keyword constructor for BootInput is also provided. Please
use ?BootInput at the REPL for more detail on feasible keywords.
Note, this function should always have output type Vector{T}.
"""
(dbootdata(data::Td, bi::BootInput)::Vector{Td}) where {Td} = [ local_get_index(data, dbootinds_one(bi)) for j = 1:bi.numresample ]
(dbootdata(data::Td ; kwargs...)::Vector{Td}) where {Td} = dbootdata(data, BootInput(data ; kwargs...))
# function dbootdata(data::Td, bi::BootInput{BootTapered})::Vector{Td} where {Td}
# data_infl = apply_influence_function(data)
# return [ dbootdata_one_infl(data_infl, bi, bm) for j = 1:bi.numresample ]
# end
"""
dbootlevel1(data::T1, bi::BootInput)
dbootlevel1(data::T1; kwargs...)
Get the level 1 bootstrapped statistics associated with dataset in data, and bootstrap methodology in BootInput.
A keyword method that calls the keyword constructor for BootInput is also provided. Please
use ?BootInput at the REPL for more detail on feasible keywords.
Note, the return type is determined by bi.flevel1, which must be a function that accepts T1,
ie typeof(data), as input. It may return any output type T2, as long as bi.flevel2 will
accept Vector{T2} as input.
For example, if data is a Vector{<:Number} then bi.flevel1 might be the function mean,
which in this case will return Float64, so bi.flevel2 must be some function that can
accept Vector{Float64} as input.
A more complicated example: if data is Matrix{<:Number} then bi.flevel1 might be the anonymous
function x->mean(x,dims=1), which in this case will return a single row Matrix{Float64}, and
so bi.flevel2 must be some function that can accept Vector{Matrix{Float64}} as input.
"""
dbootlevel1(data, bi::BootInput) = [ bi.flevel1(dbootdata_one(data, bi)) for j = 1:bi.numresample ]
dbootlevel1(data ; kwargs...) = dbootlevel1(data, BootInput(data ; kwargs...))
"""
dboot(data, bi::BootInput)
dboot(data ; kwargs...)
Get the level 2 bootstrapped statistics associated with dataset in data, and bootstrap methodology in BootInput.
A keyword method that calls the keyword constructor for BootInput is also provided. Please
use ?BootInput at the REPL for more detail on feasible keywords.
Note, the return type of the output will be determined by bi.flevel2, which must be a function that accepts
Vector{T}, where T is the output type of bi.flevel1.
For example, if data is a Vector{<:Number} and bi.flevel1 is mean, then in this case, bi.flevel1 will return
Float64, and so bi.flevel2 must be some function that accepts Vector{Float64} as input (and can have any output
type.)
Alternatively, bi.flevel2 could be the anonymous function (x -> quantile(x, [0.025, 0.975])), in which case
the input should be Vector{Float64}, and so bi.flevel1 should return Float64. Note, the output of bi.flevel2
in this case will be a 2-element Vector{Float64} with elements corresponding bootstrapped 95% confidence interval
for the level1 statistic of the input dataset
"""
dboot(data, bi::BootInput) = bi.flevel2(dbootlevel1(data, bi))
dboot(data ; kwargs...) = dboot(data, BootInput(data ; kwargs...))
"dbootlevel2 <- Identical to the dboot function. This function is only included for naming consistency with dbootlevel1"
dbootlevel2(data, bi::BootInput)= dboot(data, bi)
dbootlevel2(data ; kwargs...)= dbootlevel2(data, BootInput(data ; kwargs...))
"dbootvar <- Identical to dboot but with the level 2 statistic set to variance"
dbootvar(data ; kwargs...) = dboot(data, BootInput(data ; flevel2=var, kwargs...))
"dbootconf <- Identical to dboot but with the level 2 statistic set to a confidence interval with width determined by keyword alpha. Default alpha=0.05 corresponds to a 95% confidence interval."
dbootconf(data ; alpha::Float64=0.05, kwargs...) = (0.0 < alpha < 0.5) ? dboot(data, BootInput(data ; flevel2=(x -> quantile(x, [alpha/2, 1-(alpha/2)])), kwargs...)) : error("Invalid alpha of $(alpha) for confidence interval. alpha must lie on the interval (0.0, 0.5)")
| DependentBootstrap | https://github.com/colintbowers/DependentBootstrap.jl.git |
|
[
"MIT"
] | 1.1.4 | b494a8c911f2492a2e9085e1b8653154503c2c62 | code | 2695 |
#Local function used to transform data via appropriate influence functions for the case where bootstrap method is tapered block
function apply_influence_function(x::Vector{T}, bi::BootInput)::Vector{Float64} where {T<:Number}
if bi.flevel1 == mean
x_if = x - mean(x, 1)
elseif bi.flevel1 == sum
x_if = length(x) * (x - mean(x))
else
error("Tapered block bootstrap only implemented for a limited number of cases with known influence functions")
end
return(x_if)
end
(apply_influence_function(x::Vector{Vector{T}}, bi::BootInput)::Vector{Vector{Float64}}) where {T<:Number} = Vector{Float64}[ apply_influence_function(x[k], bi) for k = 1:length(x) ]
#Local function used to weight data for the case where bootstrap method is tapered block
function dboot_kernel_weights(bi::BootInput)::Vector{Float64}
bL = Int(ceil(bi.blocklength))
kernelInput = Float64[ (1 / bL) * (n - 0.5) for n = 1:bL ]
kernelWeight = kernel_func_pp2002(kernelInput, bi.bootmethod)
normTerm = sqrt(bL) / norm(kernelWeight, 2)
kernelWeight .*= normTerm
return(kernelWeight)
end
function dboot_weight!(x::Vector{Vector{T}}, bi::BootInput)::Vector{Vector{Float64}} where {T<:Number}
bL = Int(ceil(bi.blocklength))
bL <= 1 && return(x)
w = dboot_kernel_weights(bm)
length(w) != bL && error("Logic fail. Incorrect length output from dboot_kernel_weights function. Please file an issue.")
(num_repeat, num_remain) = divrem(size(x, 1), bL)
wLong = vcat(repeat(w, outer=num_repeat), w[1:num_remain])
for n = 1:length(x)
x[n] = wLong .* x[n]
end
return(x)
end
#Local function for the two kernel functions proposed in Paparoditis and Politis (2002) "The tapered block bootstrap for general statistics from stationary sequences"
function kernel_func_pp2002_trap(x::Float64)::Float64
p = 0.43 #Optimal value from PP (2002)
x < 0 && return(0.0)
x < p && return(x / p)
x < 1 - p && return(1.0)
x < 1 && return((1 - x) / p)
return(0.0)
end
kernel_func_pp2002_trap(x::Vector{Float64})::Vector{Float64} = Float64[ kernel_func_pp2002_trap(x[k]) for k = 1:length(x) ]
function kernel_func_pp2002_smooth(x::Float64)::Float64
p = 1.3 #Optimal value from PP (2002)
(0.0 <= x <= 1.0) && return(1 - abs(2*x - 1)^p)
return(0.0)
end
kernel_func_pp2002_smooth(x::Vector{Float64})::Vector{Float64} = Float64[ kernel_func_pp2002_smooth(x[k]) for k = 1:length(x) ]
function kernel_func_pp2002(x::Vector{Float64}, bm::BootTapered)::Vector{Float64}
bm.kernelfunction == :trap && return(kernel_func_pp2002_trap(x))
bm.kernelfunction == :smooth && return(kernel_func_pp2002_smooth(x))
error("Invalid kernel function of $(bm.kernelfunction)")
end
| DependentBootstrap | https://github.com/colintbowers/DependentBootstrap.jl.git |
|
[
"MIT"
] | 1.1.4 | b494a8c911f2492a2e9085e1b8653154503c2c62 | code | 18152 |
#Abstract supertypes
"BootMethod <- Abstract supertype for all dependent bootstrap methods"
abstract type BootMethod end
"BlockLengthMethod <- Abstract supertype for all block length selection methods"
abstract type BlockLengthMethod end
"BandwidthMethod <- Abstract supertype for all bandwidth selection methods"
abstract type BandwidthMethod end
"KernelFunctionMethod <- Abstract supertype for all kernel function methods"
abstract type KernelFunctionMethod end
#Kernel function types
"KernelDummy <- Dummy type for kernel functions"
struct KernelDummy <: KernelFunctionMethod ; end
struct KernelTrap <: KernelFunctionMethod ; end
struct KernelSmooth <: KernelFunctionMethod ; end
Base.show(io::IO, x::KernelDummy) = print(io, "Dummy kernel function method")
Base.show(io::IO, x::KernelTrap) = print(io, "Trapezoidal kernel function method from Paparoditis, Politis (2002)")
Base.show(io::IO, x::KernelSmooth) = print(io, "Smooth kernel function method from Paparoditis, Politis (2002)")
kernel_function_dict_input = Pair{Symbol,KernelFunctionMethod}[
:dummy => KernelDummy(),
:trap => KernelTrap(),
:smooth => KernelSmooth()
]::Vector{Pair{Symbol,KernelFunctionMethod}}
"KERNEL_FUNCTION_DICT <- Dictionary for converting string or symbol inputs into kernel function types"
const KERNEL_FUNCTION_DICT = Dict{Union{Symbol,String},KernelFunctionMethod}()::Dict{Union{Symbol,String},KernelFunctionMethod}
sizehint!(KERNEL_FUNCTION_DICT, 2*length(kernel_function_dict_input))
for kf in kernel_function_dict_input ; KERNEL_FUNCTION_DICT[kf[1]] = kf[2] ; end
for kf in kernel_function_dict_input ; KERNEL_FUNCTION_DICT[string(kf[1])] = kf[2] ; end
#Bootstrap method types
"BootDummy <- Dummy type used within the module. Should never be seen by the end user"
struct BootDummy <: BootMethod ; end
"BootIID <- Type for using multiple dispatch to get the IID boostrap"
struct BootIID <: BootMethod ; end
"BootStationary <- Type for using multiple dispatch to get the stationary bootstrap"
struct BootStationary <: BootMethod ; end
"BootMoving <- Type for using multiple dispatch to get the moving blocks bootstrap"
struct BootMoving <: BootMethod ; end
"BootNoOverlap <- Type for using multiple dispatch to get the non-overlapping blocks bootstrap"
struct BootNoOverlap <: BootMethod ; end
"BootCircular <- Type for using multiple dispatch to get the circular blocks bootstrap"
struct BootCircular <: BootMethod ; end
# "BootTapered <- Type for using multiple dispatch to get the tapered block bootstrap"
# struct BootTapered{Tkf<:KernelFunctionMethod} <: BootMethod
# kernelfunction::Tkf
# function BootTapered(kf::Tkf) where {Tkf}
# new{Tkf}(kf)
# end
# end
# BootTapered()::BootTapered = BootTapered(KernelTrap())
Base.show(io::IO, bm::BootDummy) = print(io, "Dummy bootstrap method")
Base.show(io::IO, bm::BootIID) = print(io, "IID bootstrap")
Base.show(io::IO, bm::BootStationary) = print(io, "Stationary bootstrap")
Base.show(io::IO, bm::BootMoving) = print(io, "Moving block bootstrap")
Base.show(io::IO, bm::BootNoOverlap) = print(io, "Non-overlapping block bootstrap")
Base.show(io::IO, bm::BootCircular) = print(io, "Circular block bootstrap")
#Base.show(io::IO, bm::BootTapered) = print(io, "Tapered block bootstrap")
boot_method_dict_input = Pair{Symbol, BootMethod}[
:iid => BootIID(),
:efron => BootIID(),
:stationary => BootStationary(),
:movingblock => BootMoving(),
:moving => BootMoving(),
:nonoverlappingblock => BootNoOverlap(),
:nooverlap => BootNoOverlap(),
:circularblock => BootCircular(),
:circular => BootCircular()]::Vector{Pair{Symbol,BootMethod}}
"BOOT_METHOD_DICT <- Dictionary for converting string or symbol inputs into bootstrap methods"
const BOOT_METHOD_DICT = Dict{Union{Symbol,String},BootMethod}()::Dict{Union{Symbol,String},BootMethod}
sizehint!(BOOT_METHOD_DICT, 2*length(boot_method_dict_input))
for bm in boot_method_dict_input ; BOOT_METHOD_DICT[bm[1]] = bm[2] ; end
for bm in boot_method_dict_input ; BOOT_METHOD_DICT[string(bm[1])] = bm[2] ; end
#bandwidth method types
"P2003 <- Type for using multiple dispatch to get the bandwidth selection procedure of Politis (2003)"
struct P2003 <: BandwidthMethod ; end
Base.show(io::IO, bw::P2003) = print(io, "Bandwidth selection of Politis (2003)")
bandwidth_method_dict_input = Pair{Symbol,BandwidthMethod}[
:politis2003 => P2003(),
:p2003 => P2003()
]::Vector{Pair{Symbol,BandwidthMethod}}
"BANDWIDTH_METHOD_DICT <- Dictionary for converting string or symbol inputs into bandwidth methods"
const BANDWIDTH_METHOD_DICT = Dict{Union{Symbol,String},BandwidthMethod}()::Dict{Union{Symbol,String},BandwidthMethod}
sizehint!(BANDWIDTH_METHOD_DICT, 2*length(bandwidth_method_dict_input))
for bw in bandwidth_method_dict_input ; BANDWIDTH_METHOD_DICT[bw[1]] = bw[2] ; end
for bw in bandwidth_method_dict_input ; BANDWIDTH_METHOD_DICT[string(bw[1])] = bw[2] ; end
#blocklength method types
"BLDummy <- Dummy type for block length method"
struct BLDummy <: BlockLengthMethod ; end
# "BLPP2002 <- Type for using multiple dispatch to get the block length selection procedure of Paparoditis and Politis (2002)"
# struct BLPP2002{Tbw<:BandwidthMethod,Tkf<:KernelFunctionMethod} <: BlockLengthMethod
# bandwidthmethod::Tbw
# kernelfunction::Tkf
# end
"BLPPW2009 <- Type for using multiple dispatch to get the block length selection procedure of Patton, Politis, and White (2009)"
struct BLPPW2009{Tbw<:BandwidthMethod} <: BlockLengthMethod
bandwidthmethod::Tbw
end
BLPPW2009()::BLPPW2009{P2003} = BLPPW2009(P2003())
# Base.show(io::IO, bl::BLPP2002) = print(io, "Block length selection of Paparoditis and Politis (2002)")
Base.show(io::IO, bl::BLPPW2009) = print(io, "Block length selection of Patton, Politis, and White (2009)")
blocklength_method_dict_input = Pair{Symbol,BlockLengthMethod}[
:ppw2009 => BLPPW2009(P2003())]::Vector{Pair{Symbol,BlockLengthMethod}}
"BLOCKLENGTH_METHOD_DICT <- Dictionary for converting string or symbol inputs into bandwidth methods"
const BLOCKLENGTH_METHOD_DICT = Dict{Union{Symbol,String},BlockLengthMethod}()::Dict{Union{Symbol,String},BlockLengthMethod}
sizehint!(BLOCKLENGTH_METHOD_DICT, 2*length(blocklength_method_dict_input))
for bl in blocklength_method_dict_input ; BLOCKLENGTH_METHOD_DICT[bl[1]] = bl[2] ; end
for bl in blocklength_method_dict_input ; BLOCKLENGTH_METHOD_DICT[string(bl[1])] = bl[2] ; end
"bootmethod_to_blocklengthmethod <- Convert a bootstrap method to the most appropriate block length method"
(bootmethod_to_blocklengthmethod(bm::T)::BLPPW2009{P2003}) where {T<:Union{BootStationary,BootMoving,BootCircular,BootNoOverlap,BootIID}} = BLPPW2009(P2003())
#(bootmethod_to_blocklengthmethod(bm::BootTapered)::BLPP2002{P2003,KernelDummy}) = BLPP2002(P2003(), KernelDummy())
"""
BootInput
Core type that defines all parameters needed to perform a bootstrap procedure. The
vast majority of users should use the keyword argument constructor that has the
method signature:
BootInput(data ; kwargs...)
where data is the dataset to be bootstrapped, and kwargs denotes a set of keyword arguments
(defined below) that are used for every exported function in the DependentBootstrap package.
The following keyword arguments and default values follow: \n
- blocklength <- Block length for bootstrapping procedure. Default value is 0. Set to <= 0 to auto-estimate the optimal block length from the dataset. Float64 inputs allowed.
- numresample <- Number of times to resample the input dataset. Default value is the module constant NUM_RESAMPLE, currently set to 1000.
- bootmethod <- Bootstrapping methodology to use. Default value is the Symbol :stationary (for the stationary bootstrap).
- blocklengthmethod <- Block length selection procedure to use if user wishes to auto-estimate the block length. Default value is the Symbol :ppw2009 (use the method described in Patton, Politis, and White (2009)).
- flevel1 <- A function that converts the input dataset to the estimator that the user wishes to bootstrap. Default value is the sample mean.
- flevel2 <- A function that converts a vector of estimators constructed by flevel1 into a distributional parameter. Default value is sample variance.
- numobsperresample <- Number of observations to be drawn (with replacement) per resample. The default value is the number of observations in the dataset (the vast majority of users will want this default value).
- fblocklengthcombine <- A function for converting a Vector{Float64} of estimated blocklengths to a single Float64 blocklength estimate. Default value is median.
The constructor will attempt to convert all provided keyword arguments to appropriate types,
and will notify the user via an error if a supplied keyword argument is not valid.
Note that the bootmethod and blocklengthmethod keyword arguments will accept both
Symbol and String inputs, and will convert them to BootMethod and BlockLengthMethod
types internally. To see a list of acceptable Symbol or String values for the bootmethod and
blocklengthmethod keyword arguments, use: \n
- collect(keys(DependentBootstrap.BOOT_METHOD_DICT))
- collect(keys(DependentBootstrap.BLOCKLENGTH_METHOD_DICT))
respectively. A small proportion of users may need the fine-grained control that comes
from constructing BootMethod and BlockLengthMethod types explicitly and then providing
them to the keyword constructor. These users should use ?BootMethod and ?BlockLengthMethod
at the REPL for more info.
BootInput is not mutable, but the type is near instantaneous to construct, so if a user wishes
to amend a BootInput it is recommended to just construct another one. A special constructor
is provided to facilitate this process that has the method definition: \n
- BootInput(data, bootinput::BootInput ; kwargs...)
where the new BootInput draws its fields from the keyword arguments that are provided, and then
the input BootInput for any keyword arguments that are not provided.
Note that all exported functions in the DependentBootstrap package exhibit the
method signature: \n
- exported_func(data ; kwargs...)
which in practice just wraps the keyword argument constructor for a BootInput, and
then calls the method signature: \n
- exported_func(data, bootinput::BootInput)
"""
struct BootInput{Tbm<:BootMethod,Tbl<:BlockLengthMethod,Tf1<:Function,Tf2<:Function,Tfc<:Function}
numobs::Int
blocklength::Float64
numresample::Int
bootmethod::Tbm
blocklengthmethod::Tbl
flevel1::Tf1
flevel2::Tf2
numobsperresample::Int
fblocklengthcombine::Tfc
function BootInput(numobs::Int, blocklength::Float64, numresample::Int, bootmethod::Tbm, blmethod::Tbl,
flevel1::Tf1, flevel2::Tf2, numobsperresample::Int, fblocklengthcombine::Tfc) where {Tbm<:BootMethod,Tbl<:BlockLengthMethod,Tf1<:Function,Tf2<:Function,Tfc<:Function}
typeof(bootmethod) <: BootDummy && error("bootmethod is set to BootDummy. You should not have been able to accidentally reach this point. Please file an issue.")
numobs < 2 && error("Number of observations input to BootInput must be 2 or greater: $(numobs)")
(isnan(blocklength) || isinf(blocklength)) && error("Invalid blocklength: $(blocklength)")
numresample < 1 && error("Number of resamples must be strictly positive: $(numresample)")
numobsperresample < 1 && error("Number of observations per resample must be strictly positive: $(numobsperresample)")
blocklength <= 0.0 && error("Block length must be strictly positive. BootInput outer constructors that include the dataset as the first argument will automatically estimate the block length if you do not specify it")
Tbm == BootDummy && error("Do not use BootDummy as an input type. It is for internal module use only")
Tbl == BLDummy && error("Do not use BLDummy as an input type. It is for internal module use only")
new{Tbm,Tbl,Tf1,Tf2,Tfc}(numobs, blocklength, numresample, bootmethod, blmethod, flevel1, flevel2, numobsperresample, fblocklengthcombine)
end
end
#BootInput empty constructor
BootInput() = BootInput(2, 1.0, 1, BootIID(), BLPPW2009(), mean, var, 1, identity)
#BootInput constructor that ensures blocklength is auto-detected if need be
function BootInput(data, numobs::Int, blocklength::Number, numresample::Int, bootmethod::Tbm, blocklengthmethod::Tbl, flevel1::Tf1,
flevel2::Tf2, numobsperresample::Int, fblocklengthcombine::Tfc) where {Tbm<:BootMethod,Tbl<:BlockLengthMethod,Tf1<:Function,Tf2<:Function,Tfc<:Function}
blocklength = Float64(blocklength)
Tbm <: BootIID && (blocklength = 1.0)
blocklength <= 0.0 && (blocklength = optblocklength(data, blocklengthmethod, bootmethod, fblocklengthcombine))
return BootInput(numobs, blocklength, numresample, bootmethod, blocklengthmethod, flevel1, flevel2, numobsperresample, fblocklengthcombine)
end
#BootInput constructors that use keyword arguments
function BootInput(data ; blocklength=0, numresample=NUM_RESAMPLE, bootmethod=:stationary, blocklengthmethod=:dummy,
flevel1=mean, flevel2=var, numobsperresample=num_obs(data), fblocklengthcombine=median)
numobs = num_obs(data)
blocklength = bootinput_get_blocklength(blocklength)
numresample = bootinput_get_numresample(numresample)
bootmethod = bootinput_get_bootmethod(bootmethod)
blocklengthmethod = bootinput_get_blocklengthmethod(blocklengthmethod, bootmethod)
flevel1 = bootinput_get_flevel1(flevel1)
flevel2 = bootinput_get_flevel2(flevel2)
numobsperresample = bootinput_get_numobsperresample(numobsperresample)
fblocklengthcombine = bootinput_get_fblocklengthcombine(fblocklengthcombine)
typeof(bootmethod) <: BootIID && (blocklength = 1.0)
blocklength <= 0.0 && (blocklength = optblocklength(data, blocklengthmethod, bootmethod, fblocklengthcombine))
return BootInput(numobs, blocklength, numresample, bootmethod, blocklengthmethod, flevel1, flevel2, numobsperresample, fblocklengthcombine)
end
#Constructor for building a new BootInput using keyword arguments where possible, and, failing that, the fields
#of an existing BootInput
_db_bi_dummy_f() = error("This function is designed to never be called. It is used as a default value for keyword arguments so as to check whether the user has specified them")
function BootInput(data, bi::BootInput ; blocklength=-9, numresample=-9, bootmethod=:z, blocklengthmethod=:z,
flevel1=_db_bi_dummy_f, flevel2=_db_bi_dummy_f, numobsperresample=-9,
fblocklengthcombine=_db_bi_dummy_f)
blocklength == -9 && (blocklength = bi.blocklength)
numresample == -9 && (numresample = bi.numresample)
bootmethod == :z && (bootmethod = bi.bootmethod)
blocklengthmethod == :z && (blocklengthmethod = bi.blocklengthmethod)
flevel1 == _db_bi_dummy_f && (flevel1 = bi.flevel1)
flevel2 == _db_bi_dummy_f && (flevel2 = bi.flevel2)
numobsperresample == -9 && (numobsperresample = bi.numobsperresample)
fblocklengthcombine == _db_bi_dummy_f && (fblocklengthcombine = bi.fblocklengthcombine)
return BootInput(data, blocklength=blocklength, numresample=numresample, bootmethod=bootmethod, blocklengthmethod=blocklengthmethod,
flevel1=flevel1, flevel2=flevel2, numobsperresample=numobsperresample, fblocklengthcombine=fblocklengthcombine)
end
bootinput_get_blocklength(x::Number)::Float64 = Float64(x)
bootinput_get_blocklength(x) = error("Invalid type for blocklength input. Use a subtype of Number, eg Int or Float64.")
bootinput_get_numresample(x::Number)::Int = Int(x)
bootinput_get_numresample(x) = error("Invalid type for numresample input. Use a subtype of Number, eg Int or Float64.")
function bootinput_get_bootmethod(bootmethod::T) where {T<:Union{Symbol,String}}
bm = get(BOOT_METHOD_DICT, bootmethod, BootDummy())
typeof(bm) <: BootDummy && error("No matching entry found in dictionary for bootmethod input $(bootmethod). Please use collect(keys(DependentBootstrap.BOOT_METHOD_DICT)) at the REPL to see a list of valid keyword arguments.")
return bm
end
bootinput_get_bootmethod(x) = typeof(x) <: BootMethod ? x : error("Invalid type for bootmethod input. Use Symbol, String, or a subtype of BootMethod")
function bootinput_get_blocklengthmethod(blocklengthmethod::T, bootmethod::Tbm) where {T<:Union{Symbol,String},Tbm<:BootMethod}
string(blocklengthmethod) == "dummy" && return bootmethod_to_blocklengthmethod(bootmethod)
blm = get(BLOCKLENGTH_METHOD_DICT, blocklengthmethod, BLDummy())
typeof(blm) <: BLDummy && error("No matching entry found in dictionary for blocklengthmethod input $(blocklengthmethod). Please use collect(keys(DependentBootstrap.BLOCKLENGTH_METHOD_DICT)) at the REPL to see a list of valid keyword arguments.")
return blm
end
bootinput_get_blocklengthmethod(x, bootmethod) = typeof(x) <: BlockLengthMethod ? x : error("Invalid type for bootmethod input. Use Symbol, String, or a subtype of BootMethod")
(bootinput_get_flevel1(f::Tf)::Tf) where {Tf<:Function} = f
bootinput_get_flevel1(f) = error("Invalid type for flevel1 input. Use a subtype of Function")
(bootinput_get_flevel2(f::Tf)::Tf) where {Tf<:Function} = f
bootinput_get_flevel2(f) = error("Invalid type for flevel2 input. Use a subtype of Function")
bootinput_get_numobsperresample(x::Number)::Int = Int(x)
bootinput_get_numobsperresample(x) = error("Invalid type for numobsperresample input. Use a subtype of Number, eg Int or Float64.")
(bootinput_get_fblocklengthcombine(f::Tf)::Tf) where {Tf<:Function} = f
bootinput_get_fblocklengthcombine(f) = error("Invalid type for fblocklengthcombine input. Use a subtype of Function")
function Base.show(io::IO, x::BootInput)
println(io, "Dependent bootstrap input:")
println(io, " Number of observations in dataset = $(x.numobs)")
println(io, " Current block length = $(x.blocklength)")
println(io, " Number of resamples = $(x.numresample)")
println(io, " Bootstrap method = $(x.bootmethod)")
println(io, " Block length method = $(x.blocklengthmethod)")
println(io, " Level 1 statistic = $(x.flevel1)")
println(io, " Level 2 statistic = $(x.flevel2)")
println(io, " Number of observations per resample = $(x.numobsperresample)")
println(io, " Block length combine function = $(x.fblocklengthcombine)")
end
| DependentBootstrap | https://github.com/colintbowers/DependentBootstrap.jl.git |
|
[
"MIT"
] | 1.1.4 | b494a8c911f2492a2e9085e1b8653154503c2c62 | code | 6427 |
using Random, Test
using StatsBase
using Distributions
using DependentBootstrap
using DataFrames, TimeSeries
#Quick and dirty function for simulating AR(1) data with deterministic start point for random number generation
function temp_ar(seedint::Int)
#srand(seedInt)
Random.seed!(seedint)
N = 100
e = randn(N)
x = NaN*ones(Float64,N)
x[1] = 0.0
for n = 2:N
x[n] = 0.8 * x[n-1] + e[n]
end
return x
end
#Get the AR(1) data
x = temp_ar(1234);
xmat = hcat(x, temp_ar(5678));
xvv = [ xmat[:, k] for k = 1:size(xmat,2) ]
#bootstrap methods
bootmethodvec = [:iid, :stationary, :moving, :circular, :nooverlap];
bootmethodtypevec = [DependentBootstrap.BootIID(), DependentBootstrap.BootStationary(), DependentBootstrap.BootMoving(),
DependentBootstrap.BootCircular(), DependentBootstrap.BootNoOverlap()]
blocklengthvec = [0.0, 5.0];
blocklengthmethodvec = [:ppw2009]
blocklengthmethodtypevec = [DependentBootstrap.BLPPW2009()]
#Test constructor
@testset "BootInput constructor test" begin
for kbm = 1:length(bootmethodvec)
for kbl = 1:length(blocklengthvec)
for kblm = 1:length(blocklengthmethodvec)
bi = BootInput(x, 100, blocklengthvec[kbl], 200, bootmethodtypevec[kbm],
blocklengthmethodtypevec[kblm], var, std, 300, mean)
@test bi.numresample == 200
@test bi.bootmethod == bootmethodtypevec[kbm]
@test bi.blocklengthmethod == blocklengthmethodtypevec[kblm]
@test bi.flevel1 == var
@test bi.flevel2 == std
@test bi.numobsperresample == 300
@test bi.fblocklengthcombine == mean
bi = BootInput(x, numresample=200, bootmethod=bootmethodvec[kbm], blocklength=blocklengthvec[kbl],
blocklengthmethod=blocklengthmethodvec[kblm], flevel1=var, flevel2=std,
numobsperresample=300, fblocklengthcombine=mean)
@test bi.numresample == 200
@test bi.bootmethod == bootmethodtypevec[kbm]
@test bi.blocklengthmethod == blocklengthmethodtypevec[kblm]
@test bi.flevel1 == var
@test bi.flevel2 == std
@test bi.numobsperresample == 300
@test bi.fblocklengthcombine == mean
end
end
end
end
bootmethodvec = Symbol[:iid, :stationary, :moving]
correctblocklength1 = Vector{Float64}[Float64[1.0, 7.002404488495543, 8.015752150100226]];
correctblocklength2 = Vector{Float64}[Float64[1.0, 6.837001835299588, 7.826413377230708]];
#Test block length selection procedure
@testset "Block length selection test" begin
for kbm = 1:length(bootmethodvec)
for kblm = 1:length(blocklengthmethodvec)
bi = BootInput(xmat[:, 1], numresample=200, bootmethod=bootmethodvec[kbm], blocklengthmethod=blocklengthmethodvec[kblm])
@test isapprox(bi.blocklength, correctblocklength1[kblm][kbm])
bi = BootInput(xmat[:, 2], numresample=200, bootmethod=bootmethodvec[kbm], blocklengthmethod=blocklengthmethodvec[kblm])
@test isapprox(bi.blocklength, correctblocklength2[kblm][kbm])
bi = BootInput(xmat, numresample=200, bootmethod=bootmethodvec[kbm], blocklengthmethod=blocklengthmethodvec[kblm], fblocklengthcombine=mean)
@test isapprox(bi.blocklength, mean([correctblocklength1[kblm][kbm],correctblocklength2[kblm][kbm]]))
end
end
end
#Test univariate bootstrap method
bootmethodvec = [:iid, :stationary, :moving, :circular, :nooverlap];
correctbootunivariate = Float64[0.02231785457420185,0.0805280736777265,0.07755558515710691,0.07248357346968781,0.0718412998084052]
correctbootunivariatebl1 = 0.021374650187840242*ones(Float64, length(bootmethodvec))
@testset "Univariate bootstrap test" begin
for kbm = 1:length(bootmethodvec)
Random.seed!(1234)
y = dboot(x, numresample=1000, bootmethod=bootmethodvec[kbm], blocklength=5, flevel1=mean, flevel2=var)
@test isapprox(y, correctbootunivariate[kbm])
end
for kbm = 1:length(bootmethodvec)
Random.seed!(1234)
y = dboot(x, numresample=500, bootmethod=bootmethodvec[kbm], blocklength=1, flevel1=mean, flevel2=var)
@test isapprox(y, correctbootunivariatebl1[kbm])
end
end
#Test multivariate bootstrap
bootmethodvec = [:iid, :stationary, :moving, :circular, :nooverlap];
correctbootmultmatrix = Float64[0.19194008990823777,0.40101052326568176,0.5186276897440075,0.44038974430819566,0.3960765922525029]
correctbootmultvv = Float64[0.1461957546031049,0.3546585480604615,0.316914521186327,0.34363235435327766,0.361937416083102]
@testset "Multivariate bootstrap test" begin
for kbm = 1:length(bootmethodvec)
Random.seed!(1234)
y = dbootvar(xmat, numresample=1000, bootmethod=bootmethodvec[kbm], blocklength=5, flevel1=x->minimum(x[:,1].*x[:,2]))
@test isapprox(y, correctbootmultmatrix[kbm])
Random.seed!(1234)
y = dbootconf(xvv, numresample=1000, bootmethod=bootmethodvec[kbm], blocklength=5, flevel1=flevel1=x->mean([mean(x[1]),mean(x[2])]))
@test isapprox(y[2], correctbootmultvv[kbm])
end
end
#Test dbootdata and dbootinds
@testset "Test dbootdata and dbootinds" begin
a = dbootinds(x, numresample=20, blocklength=5, bootmethod=:stationary)
@test length(a) == 20
a = dbootdata(x, numresample=20, blocklength=5, bootmethod=:stationary)
@test length(a) == 20
end
#Test exotic dataset types
@testset "Exotic data types test" begin
Random.seed!(1234)
xdf = DataFrame(xmat)
#y = dboot(xdf, bootmethod=:stationary, blocklength=5, numresample=1000, flevel1=x->mean(DataFrames.columns(x)[1]))
y = dboot(xdf, bootmethod=:stationary, blocklength=5, numresample=1000, flevel1=(x->mean(x[:,1])))
@test isapprox(y, 0.0805280736777265)
dtvec = [ Date(2000)+Day(n) for n = 1:size(xmat,1) ]
xta1 = TimeSeries.TimeArray(dtvec, x)
xta2 = TimeSeries.TimeArray(dtvec, xmat)
Random.seed!(1234)
y1 = dboot(xta1, bootmethod=:stationary, blocklength=5, numresample=1000, flevel1=x->mean(values(x)))
Random.seed!(1234)
y2 = dboot(xta2, bootmethod=:stationary, blocklength=5, numresample=1000, flevel1=x->mean(values(x)))
@test isapprox(y1, 0.0805280736777265)
@test isapprox(y2, 0.05077003035163576)
end
| DependentBootstrap | https://github.com/colintbowers/DependentBootstrap.jl.git |
|
[
"MIT"
] | 1.1.4 | b494a8c911f2492a2e9085e1b8653154503c2c62 | docs | 12006 | DependentBootstrap.jl
=====================
## Note: Package tests may fail on Julia v1.6 or later due to an update in random number generation. I need to update the tests in this package so they don't depend on the specifics of the underlying random number generator in Julia, which I will do in due course. For now, don't worry if the tests fail, nothing of note has been changed in this package in years.
[](https://travis-ci.org/colintbowers/DependentBootstrap.jl)
A module for the Julia language that implements several varieties of the dependent statistical bootstrap as well as the corresponding block-length selection procedures.
## News
This package is compatible with julia v1.0+. If you are running v0.6, you will need to use `Pkg.pin("DependentBootstrap", v"0.1.1")` at the REPL, and if you are running v0.5, use `Pkg.pin("DependentBootstrap", v"0.0.1")`. Compability with versions before v0.5 is not available.
## Main features
This module allows Julia users to estimate the distribution of a test statistic using any of several varieties of the dependent bootstrap.
The following bootstrap methods are implemented:
* the *iid* bootstrap proposed in Efron (1979) "Bootstrap Methods: Another Look at the Jackknife",
* the stationary bootstrap proposed in Politis, Romano (1994) "The Stationary Bootstrap"
* the moving block bootstrap proposed in Kunsch (1989) "The jackknife and the bootstrap for general stationary observations" and (independently) Liu, Singh (1992) "Moving blocks jackknife and bootstrap capture weak dependence",
* the circular block bootstrap proposed in Politis, Romano (1992) "A circular block resampling procedure for stationary data", and
* the non-overlapping block bootstrap described in Lahiri (1999) *Resampling Methods for Dependent Data* (this method is not usually used and is included mainly as a curiosity).
The module also implements the following block length selection procedures:
* the block length selection procedure proposed in Politis, White (2004) "Automatic Block Length Selection For The Dependent Bootstrap", including the correction provided in Patton, Politis, and White (2009)
Bandwidth selection for the block length procedures is implemented using the method proposed in Politis (2003) "Adaptive Bandwidth Choice".
Some work has been done to implement the tapered block bootstrap of Paparoditis, Politis (2002) "The tapered block bootstrap for general statistics from stationary sequences", along with corresponding block-length selection procedures, but it is not yet complete.
The module is implemented entirely in Julia.
## What this package does not include
I have not included any procedures for bootstrapping confidence intervals in a linear regression framework, or other parametric models. This functionality is provided by [Bootstrap.jl](https://github.com/juliangehring/Bootstrap.jl), and work is currently under way to add bootstrap methods from this package to the `Bootstrap` API.
I also have not included support for the jackknife, wild bootstrap, or subsampling procedures. I would be quite open to pull requests that add these methods to the present package, but have not had time to implement them myself. Work is ongoing to include the tapered block bootstrap, and ideally, the package will also eventually include the extended tapered block bootstrap. If you are interested in working on any of these projects, please feel free to contact me.
## How to use this package
#### Installation
This package should be added using `using Pkg ; Pkg.add("DependentBootstrap")`, and can then be called with `using DependentBootstrap`. The package depends on `StatsBase` and `Distributions` for some functionality, and on `DataFrames` and `TimeSeries` so that `DataFrame` and `TimeArray` datasets can be supported by this packages methods.
#### Terminology
In what follows, I use the terminology from Lahiri (1999) *Resampling Methods for Dependent Data* and refer to the underlying test statistic of interest as a *level 1 statistic*, and the distribution parameter of the test statistic that is of interest as a *level 2 parameter*. For example, the user might have some dataset `x` of type `T_data`, and be interested in the variance of the sample mean of `x`. In this case, the level 1 statistic is the sample mean function `mean`, and the level 2 parameter is the sample variance function `var`.
I use `T_data` to refer to the type of the users dataset, `T_level1` to refer to the output type obtained by applying the level 1 statistic function to the dataset, and `T_level2` to refer to the output type obtained by applying the level 2 statistic to a `Vector{T_level1}` (i.e. a vector of resampled level 1 statistics).
#### Exported functions
The package exports the following functions, all of which have docstrings that can be called interactively at the REPL:
* `dbootinds(...)::Vector{Vector{Int}}` -> Returns indices that can be used to index into the underlying data to obtain bootstrapped data. Note, each inner vector of the output corresponds to a single re-sample for the underlying data.
* `dbootdata(...)::Vector{T_data}` -> Returns the bootstrapped data. Each element of the output vector corresponds to one re-sampled dataset, and the output vector will have length equal to `numresample` (a parameter discussed later).
* `dbootlevel1(...)::Vector{T_level1}` -> Returns a vector of bootstrapped level 1 statistics, where the output vector will have length equal to `numresample`.
* `dbootlevel2(...)::T_level2` -> Returns the bootstrapped distribution parameter of the level 1 statistic.
* `dboot(...)::T_level2` -> Identical to dbootlevel2. Most users will want to use this function.
* `dbootvar(...)::Float64` -> Identical to `dboot` but automatically sets `flevel2` to `var` (the sample variance function)
* `dbootconf(...)::Vector{Float64}` -> Identical to `dboot` but automatically sets `flevel2` to the anonymous function `x -> quantile(x, [0.025, 0.975])`, so the level 2 distribution parameter is a 95% confidence interval. In addition to the usual keywords, the keyword version of this function also accepts the keyword `alpha::Float64=0.05`, which controls the width of the confidence interval. Note, `0.05` corresponds to a 95% confidence interval, `0.1` to a 90% interval, and `0.01` to a 99% interval (and so on).
* `optblocklength(...)::Float64` -> Returns the optimal block length.
The function `bandwidth_politis_2003{T<:Number}(x::AbstractVector{T})::Tuple{Int, Float64, Vector{Float64}}` is not exported, but the docstrings can be accessed using `?DependentBootstrap.bandwidth_politis_2003` at the REPL. This function implements the bandwidth selection procedure from Politis (2003) discussed above, and may be of independent interest to some users.
All of the above functions exhibit the following two core methods:
* `f(data ; kwargs...)`
* `f(data, bi::BootInput)`
where `data` is the users underlying dataset, `kwargs` is a collection of keyword arguments, and `bi::BootInput` is a core type exported by the module that will be discussed later (but can be safely ignored by most users). The following types for `data` are currently accepted:
* `Vector{<:Number}`,
* `Matrix{<:Number}` where rows are observations and columns are variables,
* `Vector{Vector{<:Number}}` where each inner vector is a variable,
* `DataFrame`
* `TimeArray`
Of the two core methods, most users will want the `kwargs` method. A list of valid keyword arguments and their default values follows:
* `blocklength` <- Block length for bootstrapping procedure. The default value is `0`. Set to <= 0 to auto-estimate the optimal block length from the dataset. `Float64` inputs are allowed.
* `numresample` <- Number of times to resample the input dataset. The default value is the module constant `NUM_RESAMPLE`, currently set to `1000`.
* `bootmethod` <- Bootstrapping methodology to use. The default value is `:stationary` (for the stationary bootstrap).
* `blocklengthmethod` <- Block length selection procedure to use if user wishes to auto-estimate the block length. Default value is `:ppw2009` (use the method described in Patton, Politis, and White (2009)).
* `flevel1` <- A function that converts the input dataset to the estimator that the user wishes to bootstrap. The default value is `mean`.
* `flevel2` <- A function that converts a vector of estimators constructed by `flevel1` into a distributional parameter. The default value is `var`.
* `numobsperresample` <- Number of observations to be drawn (with replacement) per resample. The default value is the number of observations in the dataset (the vast majority of users will want this default value).
* `fblocklengthcombine` <- A function for converting a `Vector{Float64}` of estimated blocklengths to a single `Float64` blocklength estimate, which is necessary when the input dataset is a multivariate type. The default value is `median`.
A list of acceptable keyword arguments for `bootmethod` and `blocklengthmethod` follows. Note you can use either `String` or `Symbol` when specifying these arguments. For `bootmethod` we have:
* `:iid` or `:efron` <- IID bootstrap
* `:stationary` <- Stationary bootstrap
* `:movingblock` or `:moving` <- Moving block bootstrap
* `:nonoverlappingblock` or `:nooverlap` <- Nonoverlapping block bootstrap
* `:circularblock` or `circular` <- Circular block bootstrap
For `blocklengthmethod` we have:
* `:ppw2009` <- Block length selection method of Patton, Politis, and White (2009)
Acceptable arguments can also be examined interactively by examining the keys of the module dictionaries `BOOT_METHOD_DICT` and `BLOCKLENGTH_METHOD_DICT`.
In practice, the keyword argument method `f(data ; kwargs...)` actually just wraps a call to `f(data, BootInput(kwargs...))` under the hood. However, most users will not need to concern themselves with this level of detail.
For those who wish more fine-grained control, please use `?BootInput` at the REPL to get more information on this core module type.
#### Examples
Let `data::Vector{Float64}`.
The variance of the sample mean of `data` can be bootstrapped using a stationary bootstrap with optimally estimated block length using `dboot(data)` or `dbootvar(data)`.
A 90% confidence interval for the sample median using a circular block bootsrap with block length of 5 can be estimated using `dboot(data, blocklength=5, bootmethod=:circular, flevel1=median, flevel2=(x -> quantile(x, [0.05, 0.95])))` or `dbootconf(data, blocklength=5, bootmethod=:circular, flevel1=median, alpha=0.1)`.
Moving block bootstrap indices for generating bootstrapped data with optimally estimated block length can be obtained using `dbootinds(data, bootmethod=:moving)`, or if the user wants the bootstrapped data not the indices, `dbootdata(data, bootmethod=:moving)`. If the user wants bootstrapped sample medians of `data`, then use `dbootlevel1(data, bootmethod=:moving, flevel1=median)`.
If the user wants the optimal block length using the method proposed in Patton, Politis, and White (2009), use `optblocklength(data, blmethod=:ppw2009)`.
Now let `data::Matrix{Float64}`.
If the user wants the median optimal block length from each column of `data`, use `optblocklength(data, blmethod=:ppw2009)`. If the user wants the average optimal block length use `optblocklength(data, blmethod=:ppw2009, fblocklengthcombine=mean)`.
If the user wants the median of the test statistic that is the maximum of the sample mean of each column, using a stationary bootstrap with optimal block length, then use `dboot(data, flevel1=(x -> maximum(mean(x, dims=1))), flevel2=median)`. If `data::Vector{Vector{Float64}}` instead, and the user wanted the 95% confidence interval, use `dbootconf(data, flevel1=(x -> maximum([ mean(x[k]) for k = 1:length(x) ])))`.
| DependentBootstrap | https://github.com/colintbowers/DependentBootstrap.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 532 | using Documenter, EasyML
makedocs(modules=[EasyML],
sitename = "EasyML.jl",
pages = ["Home" => "index.md",
"Quick guide" => "quick_guide.md",
"GUI guide" => "gui_guide.md",
"Functions" => "functions.md",
"Advanced" => "advanced.md",
"Handling issues" => "handling_issues.md"],
authors = "Open Machine Learning Association",
format = Documenter.HTML(prettyurls = false)
)
deploydocs(
repo = "github.com/OML-NPA/EasyML.jl.git",
devbranch = "main"
) | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 2284 |
module EasyML
# Import packages
using
# Interfacing
QML, Qt5QuickControls2_jll, Qt5Charts_jll, CxxWrap, CUDA,
# Data structuring
Parameters, DataFrames, Dates,
# Data import/export
FileIO, ImageIO, JSON, BSON, XLSX, CSVFiles,
# Data manipulation
Unicode,
# Image manipulation
Images, ColorTypes, ImageFiltering, ImageTransformations,
ImageMorphology, DSP, ImageMorphology.FeatureTransform, ImageSegmentation,
# Machine learning
Flux, Flux.Losses, FluxExtra,
# Math functions
Random, StatsBase, LinearAlgebra, Combinatorics,
# Other
ProgressMeter, FLoops
import CUDA.CuArray, Flux.outdims, StatsBase.std
CUDA.allowscalar(false)
# Include functions
include("modules/common/Common.jl")
include("modules/classes/Classes.jl")
include("modules/design/Design.jl")
include("modules/datapreparation/DataPreparation.jl")
include("modules/training/Training.jl")
include("modules/validation/Validation.jl")
include("modules/application/Application.jl")
using .Common, .Classes, .Design, .DataPreparation, .Training, .Validation, .Application
import .Training: TrainingOptions, TrainingData, TestingData, training_data, testing_data
import .DataPreparation.preparation_data, .Validation.validation_data, .Application.application_data
include("exported_functions.jl")
export QML, CUDA, Flux, FluxExtra, Normalizations, NNlib, ColorTypes
export Join, Split, Addition, Activation, Flatten, Identity
export ImageClassificationClass, ImageRegressionClass, BorderClass, ImageSegmentationClass
export model_data, global_options, data_preparation_options, training_options, validation_options, application_options,
preparation_data, training_data, validation_data, application_data
export set_savepath, save_options, load_options, change, save_model, load_model
export change_classes, design_model, prepare_training_data, get_urls_training, get_urls_testing, prepare_testing_data, train,
remove_training_data, remove_testing_data, remove_training_results, validation_results_data, get_urls_validation,
validate, remove_validation_data, remove_validation_results, get_urls_application, change_output_options, apply, remove_application_data
function __init__()
load_options()
# Needed to avoid an endless loop for Julia canvas
ENV["QSG_RENDER_LOOP"] = "basic"
end
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 9274 |
function assign_urls(some_data::Union{TrainingData,TestingData},urls)
if isnothing(urls)
return nothing
else
if problem_type()==:classification
some_data.ClassificationData.Urls = urls
elseif problem_type()==:regression
some_data.RegressionData.Urls = urls
else problem_type()==:segmentation
some_data.SegmentationData.Urls = urls
end
end
end
function EasyML.DataPreparation.get_urls(url_inputs::String,some_data::Union{TrainingData,TestingData})
urls = get_urls(url_inputs)
assign_urls(some_data,urls)
return nothing
end
function EasyML.DataPreparation.get_urls(url_inputs::String,url_labels::String,some_data::Union{TrainingData,TestingData})
urls = get_urls(url_inputs,url_labels)
assign_urls(some_data,urls)
return nothing
end
function EasyML.DataPreparation.get_urls(some_data::Union{TrainingData,TestingData})
urls = get_urls()
assign_urls(some_data,urls)
return nothing
end
function get_train_test_inds(num::Int64,fraction::Float64)
inds = randperm(num) # Get shuffled indices
ind_last_test = convert(Int64,round(fraction*num))
inds_train = inds[ind_last_test+1:end]
inds_test = inds[1:ind_last_test]
if isempty(inds_test)
@error string("Fraction of ",fraction," from ",num,
" files is 0. Increase the fraction of data used for testing to at least ",round(1/num,digits=2),".")
return nothing,nothing
end
return inds_train,inds_test
end
function get_urls_testing_main(training_data::TrainingData,testing_data::TestingData,training_options::TrainingOptions)
if training_options.Testing.data_preparation_mode==:manual
urls = get_urls()
if problem_type()==:classification
testing_data.ClassificationData.Urls = urls
elseif problem_type()==:regression
testing_data.RegressionData.Urls = urls
elseif problem_type()==:segmentation
testing_data.SegmentationData.Urls = urls
end
else
if problem_type()==:classification
typed_training_data = training_data.ClassificationData
typed_testing_data = testing_data.ClassificationData
training_inputs = typed_training_data.Urls.input_urls
testing_inputs = typed_testing_data.Urls.input_urls
training_labels = typed_training_data.Urls.label_urls
testing_labels = typed_testing_data.Urls.label_urls
elseif problem_type()==:regression
typed_training_data = training_data.RegressionData
typed_testing_data = testing_data.RegressionData
training_inputs = typed_training_data.Urls.input_urls
testing_inputs = typed_testing_data.Urls.input_urls
training_labels = typed_training_data.Urls.initial_data_labels
testing_labels = typed_testing_data.Urls.initial_data_labels
elseif problem_type()==:segmentation
typed_training_data = training_data.SegmentationData
typed_testing_data = testing_data.SegmentationData
training_inputs = typed_training_data.Urls.input_urls
testing_inputs = typed_testing_data.Urls.input_urls
training_labels = typed_training_data.Urls.label_urls
testing_labels = typed_testing_data.Urls.label_urls
end
if isempty(training_inputs) || isempty(training_labels)
@error "Training data urls should be loaded first. Run 'get_urls_training'."
return nothing
end
training_inputs_copy = copy(training_inputs)
training_labels_copy = copy(training_labels)
empty!(training_inputs)
empty!(testing_inputs)
empty!(training_labels)
empty!(testing_labels)
fraction = training_options.Testing.test_data_fraction
if problem_type()==:classification
nums = length.(training_inputs_copy)
for i = 1:length(nums)
num = nums[i]
inds_train,inds_test = get_train_test_inds(num,fraction)
if isnothing(inds_train)
return nothing
end
push!(training_inputs,training_inputs_copy[i][inds_train])
push!(testing_inputs,training_inputs_copy[i][inds_test])
end
append!(training_labels,training_labels_copy)
append!(testing_labels,training_labels_copy)
elseif problem_type()==:regression || problem_type()==:segmentation
num = length(training_inputs_copy)
inds_train,inds_test = get_train_test_inds(num,fraction)
if isnothing(inds_train)
return nothing
end
append!(training_inputs,training_inputs_copy[inds_train])
append!(testing_inputs,training_inputs_copy[inds_test])
append!(training_labels,training_labels_copy[inds_train])
append!(testing_labels,training_labels_copy[inds_test])
end
end
return nothing
end
"""
get_urls_training(url_inputs::String,url_labels::String)
Gets URLs to all files present in both folders (or a folder and a file)
specified by `url_inputs` and `url_labels` for training. URLs are automatically saved to `EasyML.training_data`.
"""
get_urls_training(url_inputs,url_labels) = get_urls(url_inputs,url_labels,training_data)
"""
get_urls_testing(url_inputs::String,url_labels::String)
Gets URLs to all files present in both folders (or a folder and a file)
specified by `url_inputs` and `url_labels` for testing. URLs are automatically saved to `EasyML.testing_data`.
"""
get_urls_testing(url_inputs,url_labels) = get_urls(url_inputs,url_labels,testing_data)
"""
get_urls_training(url_inputs::String)
Used for classification. Gets URLs to all files present in folders located at a folder specified by `url_inputs`
for training. Folders should have names identical to the name of classes. URLs are automatically saved to `EasyML.training_data`.
"""
get_urls_training(url_inputs) = get_urls(url_inputs,training_data)
"""
get_urls_testing(url_inputs::String)
Used for classification. Gets URLs to all files present in folders located at a folder specified by `url_inputs`
for testing. Folders should have names identical to the name of classes. URLs are automatically saved to `EasyML.testing_data`.
"""
get_urls_testing(url_inputs) = get_urls(url_inputs,testing_data)
"""
get_urls_training()
Opens a folder/file dialog or dialogs to choose folders or folder and a file containing inputs
and labels. URLs are automatically saved to `EasyML.training_data`.
"""
get_urls_training() = get_urls(training_data)
"""
get_urls_testing()
If testing data preparation in `modify(training_options)` is set to auto, then a percentage
of training data also specified there is reserved for testing. If testing data
preparation is set to manual, then it opens a folder/file dialog or dialogs to choose folders or a folder and a file containing inputs
and labels. URLs are automatically saved to `EasyML.testing_data`.
"""
get_urls_testing() = get_urls_testing_main(training_data,testing_data,training_options)
function EasyML.DataPreparation.prepare_data(some_data::Union{TrainingData,TestingData})
if some_data isa TrainingData
println("Training data preparation:")
channel_name = "Training data preparation"
error_message = "No input urls. Run 'get_urls_training'."
else
println("Testing data preparation:")
channel_name = "Testing data preparation"
error_message = "No input urls. Run 'get_urls_testing'."
end
if any(model_data.input_size.<1)
@error "All dimension sizes of 'model_data.input_size' should be a positive number."
return nothing
end
if input_type()==:image
if problem_type()==:classification
if isempty(some_data.ClassificationData.Urls.input_urls)
@error error_message
return nothing
end
elseif problem_type()==:regression
if isempty(some_data.RegressionData.Urls.input_urls)
@error error_message
return nothing
end
elseif problem_type()==:segmentation
if isempty(some_data.SegmentationData.Urls.input_urls)
@error error_message
return nothing
end
end
end
results = prepare_data()
if problem_type()==:classification
some_data.ClassificationData.Data = results
elseif problem_type()==:regression
some_data.RegressionData.Data = results
else # problem_type()==:segmentation
some_data.SegmentationData.Data = results
end
return nothing
end
"""
prepare_training_data()
Prepares images and corresponding labels for training using URLs loaded previously using
`get_urls_training`. Saves data to EasyML.training_data.
"""
prepare_training_data() = prepare_data(training_data)
"""
prepare_testing_data()
Prepares images and corresponding labels for testing using URLs loaded previously using
`get_urls_testing`. Saves data to `EasyML.testing_data`.
"""
prepare_testing_data() = prepare_data(testing_data) | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 1064 |
module Application
# Import packages
using
# Interfacing
CUDA,
# Data import/export
FileIO, ImageIO, CSVFiles, XLSX, JSON, BSON,
# Data manipulation
Unicode, DataFrames,
# Image manipulation
Images, ColorTypes, ImageFiltering, ImageMorphology.FeatureTransform, ImageSegmentation,
# Machine learning
Flux, Flux.Losses, FluxExtra,
# Math functions
Random, StatsBase, LinearAlgebra,
# Other
FLoops, ProgressMeter,
# EasyML ecosystem
..Common, ..Common.Classes, ..Common.Application
import CUDA.CuArray, StatsBase.std
import ..Classes
import ..Classes: change_classes, num_classes, get_class_field, get_class_data,
get_problem_type, get_input_type
# Include functions
include(string(common_dir(),"/common/validation_application.jl"))
include(string(common_dir(),"/common/preparation_validation_application.jl"))
include("output_methods.jl")
include("main.jl")
include("exported_functions.jl")
export application_options, ApplicationOptions
export change_output_options, get_urls_application, apply, remove_application_data, forward, apply_border_data
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 5106 |
"""
change(application_options::EasyML.ApplicationOptions)
Allows to change `application_options` in a GUI.
"""
function Common.change(application_options::ApplicationOptions)
@qmlfunction(
get_options,
set_options,
save_options,
pwd,
fix_slashes,
unit_test
)
path_qml = string(@__DIR__,"/gui/ApplicationOptions.qml")
gui_dir = string("file:///",replace(@__DIR__, "\\" => "/"),"/gui/")
text = add_templates(path_qml)
loadqml(QByteArray(text),
gui_dir = gui_dir)
exec()
return nothing
end
"""
change_output()
Opens a GUI for addition or modification of output options for classes.
"""
function change_output_options()
local output_type, num_c
if isempty(model_data.classes)
@error "There are no classes. Add classes using 'change_classes()'."
return nothing
end
if problem_type()==:classification
@info "Classification has no output options to change."
return nothing
elseif problem_type()==:regression
@info "Regression has no output options to change."
return nothing
elseif problem_type()==:segmentation
output_type = ImageSegmentationOutputOptions
num_c = sum(map(x -> (!).(x.overlap),model_data.classes))
end
if eltype(model_data.output_options)!=output_type ||
length(model_data.output_options)!=num_c
model_data.output_options = output_type[]
for _=1:num_c
push!(model_data.output_options,output_type())
end
end
@qmlfunction(
save_model,
get_class_field,
get_data,
get_options,
get_output,
set_output,
get_problem_type,
num_classes,
unit_test
)
path_qml = string(@__DIR__,"/gui/OutputDialog.qml")
gui_dir = string("file:///",replace(@__DIR__, "\\" => "/"),"/gui/")
text = add_templates(path_qml)
loadqml(QByteArray(text),
gui_dir = gui_dir)
exec()
return nothing
end
"""
get_urls_application(url_inputs::String)
Gets URLs to all files present in a folders specified by `url_inputs`
for application. URLs are automatically saved to `EasyML.application_data`.
"""
function get_urls_application(url_inputs::String)
if !isdir(url_inputs)
@error string(url_inputs," does not exist.")
return nothing
end
application_data.url_inputs = url_inputs
get_urls_application_main(application_data)
return nothing
end
"""
get_urls_application()
Opens a folder dialog to choose a folder containing files to which a model should be applied.
URLs are automatically saved to `EasyML.application_data`.
"""
function get_urls_application()
url_out = String[""]
observe(url) = url_out[1] = url
dir = pwd()
@info "Select a directory with input data."
application_data.url_inputs = get_folder(dir)
if application_data.url_inputs==""
@error "Input data directory URL is empty."
return nothing
else
@info string(application_data.url_inputs, " was selected.")
end
get_urls_application_main(application_data)
return nothing
end
"""
apply()
Starts application of a model.
"""
function apply()
println("Application:")
if isempty(application_data.input_urls)
@error "No input urls. Run 'get_urls_application'."
return nothing
end
empty_channel(:application_progress)
t = apply_main2(model_data,all_data,options,channels)
max_value = 0
value = 0
p = Progress(0)
while true
if max_value!=0
temp_value = get_progress(:application_progress)
if temp_value!=false
value += temp_value
# handle progress here
next!(p)
elseif value==max_value
break
else
state,error = check_task(t)
if state==:error
throw(error)
return nothing
end
sleep(0.1)
end
else
temp_value = get_progress(:application_progress)
if temp_value!=false
if temp_value!=0
max_value = temp_value
p.n = max_value
else
@error "No data to process."
break
end
else
state,error = check_task(t)
if state==:error
throw(error)
return nothing
end
sleep(0.1)
end
end
end
return nothing
end
function remove_application_data_main(application_data)
data = application_data
fields = fieldnames(ApplicationData)
for field in fields
data_field = getfield(data, field)
if data_field isa Array
empty!(data_field)
end
end
end
"""
remove_application_data()
Removes all application data.
"""
remove_application_data() = remove_application_data_main(application_data) | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 25535 |
function fix_slashes(url)
url::String = fix_QML_types(url)
url = replace(url, "\\" => "/")
url = string(uppercase(url[1]),url[2:end])
end
# Works as fill!, but does not use a reference
function fill_no_ref!(target::AbstractArray,el)
for i = 1:length(target)
target[i] = copy(el)
end
end
# Allows to read class output options from GUI
function get_output_main(model_data::ModelData,fields,ind)
fields::Vector{String} = fix_QML_types(fields)
ind::Int64 = fix_QML_types(ind)
data = model_data.output_options[ind]
for i = 1:length(fields)
field = Symbol(fields[i])
data = getproperty(data,field)
end
if data isa Symbol
return string(data)
else
return data
end
end
get_output(fields,ind) = get_output_main(model_data,fields,ind)
# Allows to write to class output options from GUI
function set_output_main(model_data::ModelData,fields,ind,value)
fields::Vector{String} = fix_QML_types(fields)
ind::Int64 = fix_QML_types(ind)
value = fix_QML_types(value)
data = model_data.output_options[ind]
for i = 1:length(fields)-1
field = Symbol(fields[i])
data = getproperty(data,field)
end
if getproperty(data, Symbol(fields[end])) isa Symbol
setproperty!(data, Symbol(fields[end]), Symbol(value))
else
setproperty!(data, Symbol(fields[end]), value)
end
return nothing
end
set_output(fields,ind,value) = set_output_main(model_data,fields,ind,value)
# Get urls of files in a selected folder. Files are used for application.
function get_urls_application_main(application_data::ApplicationData)
if input_type()==:image
allowed_ext = ["png","jpg","jpeg"]
end
input_urls,dirs = get_urls1(application_data.url_inputs,allowed_ext)
application_data.input_urls = input_urls
application_data.folders = dirs
return nothing
end
function prepare_application_data(norm_func::Function,classes::Union{Vector{T1},Vector{T3}},
model_data::ModelData,urls::Vector{String}) where {T1<:ImageClassificationClass,
T3<:ImageSegmentationClass}
num = length(urls)
data = Vector{Array{Float32,4}}(undef,length(urls))
for i = 1:num
url = urls[i]
image = load_image(url)
if :grayscale in model_data.input_properties
data_raw = image_to_gray_float(image)
else
data_raw = image_to_color_float(image)
end
norm_func(data_raw)
data[i] = data_raw[:,:,:,:]
end
data_out = cat(data...,dims=Val(4))
return data_out
end
function prepare_application_data(norm_func::Function,classes::Vector{ImageRegressionClass},
model_data::ModelData,urls::Vector{String})
num = length(urls)
data = Vector{Array{Float32,4}}(undef,length(urls))
for i = 1:num
url = urls[i]
image = load_image(url)
if size(image)!=model_data.input_size[1:2]
image = fix_image_size(model_data,image)
end
if :grayscale in model_data.input_properties
data_raw = image_to_gray_float(image)
else
data_raw = image_to_color_float(image)
end
norm_func(data_raw)
data[i] = data_raw[:,:,:,:]
end
data_out = cat(data...,dims=Val(4))
return data_out
end
function get_filenames(urls::Vector{Vector{String}})
num = length(urls)
data = Vector{Vector{String}}(undef,num)
for i = 1:num
data_temp = copy(urls[i])
data_temp = map((x) -> split(x,('\\','/')), data_temp)
data_temp = map(x->x[end],data_temp)
data_temp = split.(data_temp,'.')
data[i] = map(x->string(x[1:end-1]...),data_temp)
end
return data
end
# Batches filenames together allowing for correct naming during export
function batch_urls_filenames(urls::Vector{Vector{String}},batch_size::Int64)
num = length(urls)
filenames = get_filenames(urls)
filename_batches = Vector{Vector{Vector{String}}}(undef,num)
url_batches = Vector{Vector{Vector{String}}}(undef,num)
for i = 1:num
urls_temp = urls[i]
filenames_temp = filenames[i]
len = length(urls_temp)
url_batches_temp = Vector{Vector{String}}(undef,0)
filename_batches_temp = Vector{Vector{String}}(undef,0)
num = len - batch_size
val = max(0.0,floor(num/batch_size))
finish = Int64(val*batch_size)
inds = collect(0:batch_size:finish)
if isempty(inds)
inds = [0]
end
num = length(inds)
for j = 1:num
ind = inds[j]
if j==num
ind1 = ind+1
ind2 = len
else
ind1 = ind+1
ind2 = ind+batch_size
end
push!(url_batches_temp,urls_temp[ind1:ind2])
push!(filename_batches_temp,filenames_temp[ind1:ind2])
end
url_batches[i] = url_batches_temp
filename_batches[i] = filename_batches_temp
end
return url_batches,filename_batches
end
function get_output(norm_func::Function,classes::Vector{ImageClassificationClass},num::Int64,
urls_batched::Vector{Vector{Vector{String}}},model_data::ModelData,
num_slices_val::Int64,offset_val::Int64,use_GPU::Bool,
data_channel::Channel{Tuple{Int64,Vector{Int64}}},channels::Channels)
for k = 1:num
urls_batch = urls_batched[k]
num_batch = length(urls_batch)
for l = 1:num_batch
# Stop if asked
#=if check_abort_signal(channels.application_modifiers)
return nothing
end=#
# Get input
input_data = prepare_application_data(norm_func,classes,model_data,urls_batch[l])
# Get output
predicted = forward(model_data.model,input_data,
num_slices=num_slices_val,offset=offset_val,use_GPU=use_GPU)
_, predicted_labels4 = findmax(predicted,dims=1)
predicted_labels = map(x-> x.I[1],predicted_labels4[:])
# Return result
put!(data_channel,(l,predicted_labels))
end
end
return nothing
end
function get_output(norm_func::Function,classes::Vector{ImageRegressionClass},num::Int64,
urls_batched::Vector{Vector{Vector{String}}},model_data::ModelData,
num_slices_val::Int64,offset_val::Int64,use_GPU::Bool,
data_channel::Channel{Tuple{Int64,Vector{Float32}}},channels::Channels)
for k = 1:num
urls_batch = urls_batched[k]
num_batch = length(urls_batch)
for l = 1:num_batch
# Stop if asked
#=if check_abort_signal(channels.application_modifiers)
return nothing
end=#
# Get input
input_data = prepare_application_data(norm_func,classes,model_data,urls_batch[l])
# Get output
predicted = forward(model_data.model,input_data,
num_slices=num_slices_val,offset=offset_val,use_GPU=use_GPU)
predicted_labels = reshape(predicted,:)
# Return result
put!(data_channel,(l,predicted_labels))
end
end
return nothing
end
function get_output(norm_func::Function,classes::Vector{ImageSegmentationClass},num::Int64,
urls_batched::Vector{Vector{Vector{String}}},model_data::ModelData,
num_slices_val::Int64,offset_val::Int64,use_GPU::Bool,
data_channel::Channel{Tuple{Int64,BitArray{4}}},channels::Channels)
for k = 1:num
urls_batch = urls_batched[k]
num_batch = length(urls_batch)
for l = 1:num_batch
# Stop if asked
#=if check_abort_signal(channels.application_modifiers)
return
end=#
# Get input
input_data = prepare_application_data(norm_func,classes,model_data,urls_batch[l])
# Get output
predicted = forward(model_data.model,input_data,
num_slices=num_slices_val,offset=offset_val,use_GPU=use_GPU)
predicted_bool = predicted.>0.5
# Return result
put!(data_channel,(l,predicted_bool))
end
end
return nothing
end
function run_iteration(classes::Vector{ImageSegmentationClass},output_options::Vector{ImageSegmentationOutputOptions},
savepath::String,filenames_batch::Vector{Vector{String}},num_c::Int64,num_border::Int64,
labels_color::Vector{Vector{Float64}},labels_incl::Vector{Vector{Int64}},apply_border::Bool,border::Vector{Bool},
objs_area::Vector{Vector{Vector{Float64}}},objs_volume::Vector{Vector{Vector{Float64}}},img_ext_string::String,
img_ext::Symbol,scaling::Float64,apply_by_file::Bool,data_taken::Threads.Atomic{Bool},
data_channel::Channel{Tuple{Int64,BitArray{4}}},channels::Channels)
# Get neural network output
l,predicted_bool = take!(data_channel)
Threads.atomic_xchg!(data_taken, true)
size_dim4 = size(predicted_bool,4)
# Flatten and use border info if present
masks = Vector{BitArray{3}}(undef,size_dim4)
for j = 1:size_dim4
temp_mask = predicted_bool[:,:,:,j]
if apply_border
border_mask = apply_border_data(temp_mask,classes)
temp_mask = cat(temp_mask,border_mask,dims=Val(3))
end
for i=1:num_c
min_area = classes[i].min_area
if min_area>1
if border[i]
ind = i + num_c + num_border
else
ind = i
end
temp_array = temp_mask[:,:,ind]
# Fix areaopen not removing all objects less than min area
for _ = 1:2
areaopen!(temp_array,min_area)
end
temp_mask[:,:,ind] .= temp_array
end
end
masks[j] = temp_mask
end
# Stop if asked
#=if check_abort_signal(channels.application_modifiers)
return nothing
end=#
filenames = filenames_batch[l]
cnt = sum(length.(filenames_batch[1:l-1]))
for j = 1:length(masks)
if apply_by_file
cnt = cnt + 1
else
cnt = 1
end
filename = filenames[j]
mask = masks[j]
# Make and export images
mask_to_img(mask,classes,output_options,labels_color,border,savepath,filename,img_ext_string,img_ext)
# Make data out of masks
mask_to_data(objs_area,objs_volume,cnt,mask,output_options,labels_incl,border,num_c,num_border,scaling)
end
put!(channels.application_progress,1)
return nothing
end
function process_output(classes::Vector{ImageClassificationClass},output_options::Vector{ImageClassificationOutputOptions},
savepath_main::String,folders::Vector{String},filenames_batched::Vector{Vector{Vector{String}}},num::Int64,
img_ext_string::String,img_ext::Symbol,data_ext_string::String,data_ext::Symbol,
scaling::Float64,apply_by_file::Bool,data_channel::Channel{Tuple{Int64,Vector{Int64}}},channels::Channels)
class_names = map(x -> x.name,classes)
for k=1:num
folder = folders[k]
filenames_batch = filenames_batched[k]
num_batch = length(filenames_batch)
savepath = joinpath(savepath_main,folder)
if !isdir(savepath)
mkdir(savepath)
end
# Initialize accumulators
labels = Vector{String}(undef,0)
data_taken = Threads.Atomic{Bool}(true)
for _ = 1:num_batch
while true
if isready(data_channel) && data_taken[]==true
Threads.atomic_xchg!(data_taken, false)
break
else
# Stop if asked
#=if check_abort_signal(channels.application_modifiers)
return nothing
end=#
sleep(0.1)
end
end
# Get neural network output
_, label_inds = take!(data_channel)
Threads.atomic_xchg!(data_taken, true)
for j = 1:length(label_inds)
label_ind = label_inds[j]
push!(labels,class_names[label_ind])
end
put!(channels.application_progress,1)
end
# Export the result
filenames = reduce(vcat,filenames_batch)
df_filenames = DataFrame(Filenames=filenames)
df_labels = DataFrame(Labels = labels)
df = hcat(df_filenames,df_labels)
name = string(folder,data_ext_string)
save(df,savepath,name,data_ext)
put!(channels.application_progress,1)
end
return nothing
end
function process_output(classes::Vector{ImageRegressionClass},output_options::Vector{ImageRegressionOutputOptions},
savepath_main::String,folders::Vector{String},filenames_batched::Vector{Vector{Vector{String}}},num::Int64,
img_ext_string::String,img_ext::Symbol,data_ext_string::String,data_ext::Symbol,
scaling::Float64,apply_by_file::Bool,data_channel::Channel{Tuple{Int64,Vector{Float32}}},channels::Channels)
class_names = map(x -> x.name,classes)
for k=1:num
folder = folders[k]
filenames_batch = filenames_batched[k]
num_batch = length(filenames_batch)
savepath = joinpath(savepath_main,folder)
if !isdir(savepath)
mkdir(savepath)
end
# Initialize accumulators
labels_accum = Vector{Vector{Float32}}(undef,0)
data_taken = Threads.Atomic{Bool}(true)
for _ = 1:num_batch
while true
if isready(data_channel) && data_taken[]==true
Threads.atomic_xchg!(data_taken, false)
break
else
# Stop if asked
#=if check_abort_signal(channels.application_modifiers)
return nothing
end=#
sleep(0.1)
end
end
# Get neural network output
_, label = take!(data_channel)
Threads.atomic_xchg!(data_taken, true)
push!(labels_accum,label)
put!(channels.application_progress,1)
end
labels_temp = reduce(vcat,labels_accum)
if length(classes)==1
labels = convert(Array{Float64,2},reshape(labels_temp,:,1))
else
labels = convert(Array{Float64,2},labels_temp)
end
# Export the result
filenames = reduce(vcat,filenames_batch)
df_filenames = DataFrame(Filenames=filenames)
df_labels = DataFrame(labels,class_names)
df = hcat(df_filenames,df_labels)
name = string(folder,data_ext_string)
save(df,savepath,name,data_ext)
put!(channels.application_progress,1)
end
return nothing
end
function process_output(classes::Vector{ImageSegmentationClass},output_options::Vector{ImageSegmentationOutputOptions},
savepath_main::String,folders::Vector{String},filenames_batched::Vector{Vector{Vector{String}}},num::Int64,
num_border::Int64,labels_color::Vector{Vector{Float64}},labels_incl::Vector{Vector{Int64}},apply_border::Bool,
border::Vector{Bool},log_area_obj::Vector{Bool},log_area_obj_sum::Vector{Bool},log_area_dist::Vector{Bool},
log_volume_obj::Vector{Bool},log_volume_obj_sum::Vector{Bool},log_volume_dist::Vector{Bool},num_obj_area::Int64,
num_obj_area_sum::Int64,num_dist_area::Int64,num_obj_volume::Int64,num_obj_volume_sum::Int64,num_dist_volume::Int64,
img_ext_string::String,img_ext::Symbol,data_ext_string::String,data_ext::Symbol,
scaling::Float64,apply_by_file::Bool,data_channel::Channel{Tuple{Int64,BitArray{4}}},channels::Channels)
num_c = length(classes)
num_c_adj = num_c + sum(map(x -> sum((!).(isempty.(x.parents))),classes))
for k=1:num
folder = folders[k]
filenames_batch = filenames_batched[k]
num_batch = length(filenames_batch)
savepath = joinpath(savepath_main,folder)
if !isdir(savepath)
mkdir(savepath)
end
# Initialize accumulators
if apply_by_file
num_init = num_batch
else
num_init = 1
end
objs_area = Vector{Vector{Vector{Float64}}}(undef,num_init)
objs_volume = Vector{Vector{Vector{Float64}}}(undef,num_init)
objs_area_sum = Vector{Vector{Float64}}(undef,num_init)
objs_volume_sum = Vector{Vector{Float64}}(undef,num_init)
histograms_area = Vector{Vector{Histogram}}(undef,num_init)
histograms_volume = Vector{Vector{Histogram}}(undef,num_init)
fill_no_ref!(objs_area,Vector{Vector{Float64}}(undef,num_c_adj))
for i = 1:num_init
fill_no_ref!(objs_area[i],Float64[])
end
fill_no_ref!(objs_volume,Vector{Vector{Float64}}(undef,num_c_adj))
for i = 1:num_init
fill_no_ref!(objs_volume[i],Float64[])
end
fill_no_ref!(objs_area_sum,Vector{Float64}(undef,num_obj_area_sum))
fill_no_ref!(objs_volume_sum,Vector{Float64}(undef,num_obj_volume_sum))
fill_no_ref!(histograms_area,Vector{Histogram}(undef,num_dist_area))
fill_no_ref!(histograms_volume,Vector{Histogram}(undef,num_dist_volume))
tasks = Vector{Task}(undef,0)
data_taken = Threads.Atomic{Bool}(true)
for _ = 1:num_batch
while true
if isready(data_channel) && data_taken[]==true
Threads.atomic_xchg!(data_taken, false)
break
else
# Stop if asked
#=if check_abort_signal(channels.application_modifiers)
return nothing
end=#
sleep(0.1)
end
end
t = Threads.@spawn run_iteration(classes,output_options,savepath,filenames_batch,num_c,
num_border,labels_color,labels_incl,apply_border,border,objs_area,objs_volume,img_ext_string,
img_ext,scaling,apply_by_file,data_taken,data_channel,channels)
push!(tasks,t)
end
while length(tasks)!=num_batch
sleep(1)
end
while !all(istaskdone.(tasks))
sleep(1)
end
if num_obj_area_sum>0
for i = 1:num_init
cnt = 1
offset = 0
for j = 1:num_c
if output_options[j].Area.obj_area_sum
objs_area_sum[i][cnt] = sum(objs_area[i][j+offset])
cnt+=1
end
l_parents = length(labels_incl[j])
offset += l_parents
end
end
end
if num_obj_volume_sum>0
for i = 1:num_init
cnt = 1
offset = 0
for j = 1:num_c
if output_options[j].Volume.obj_volume_sum
objs_volume_sum[i][cnt] = sum(objs_volume[i][j])
cnt+=1
end
l_parents = length(labels_incl[j])
offset += l_parents
end
end
end
data_to_histograms(histograms_area,histograms_volume,objs_area,objs_volume,
output_options,num_init,num_c,labels_incl)
# Export data
if apply_by_file
filenames = reduce(vcat,filenames_batch)
else
filenames = [folder]
end
export_histograms(histograms_area,histograms_volume,classes,num_init,num_dist_area,
num_dist_volume,log_area_dist,log_volume_dist,savepath,filenames,data_ext_string,data_ext)
export_objs("Objects",objs_area,objs_volume,classes,num_init,num_obj_area,num_obj_volume,
log_area_obj,log_volume_obj,labels_incl,savepath,filenames,data_ext_string,data_ext)
export_objs("Objects sum",objs_area_sum,objs_volume_sum,classes,num_init,num_obj_area_sum,num_obj_volume_sum,
log_area_obj_sum,log_volume_obj_sum,labels_incl,savepath,filenames,data_ext_string,data_ext)
put!(channels.application_progress,1)
end
return nothing
end
function get_output_info(classes::Vector{ImageClassificationClass},output_options::Vector{ImageClassificationOutputOptions})
return classes,()
end
function get_output_info(classes::Vector{ImageRegressionClass},output_options::Vector{ImageRegressionOutputOptions})
return classes,()
end
function get_output_info(classes::Vector{ImageSegmentationClass},output_options::Vector{ImageSegmentationOutputOptions})
class_inds,labels_color,labels_incl,border = get_class_data(classes)
classes = classes[class_inds]
labels_color = labels_color[class_inds]
labels_incl = labels_incl[class_inds]
num_parents = sum(length.(labels_incl))
num_border = sum(border)
apply_border = num_border>0
log_area_obj = map(x->x.Area.obj_area,output_options)
log_area_obj_sum = map(x->x.Area.obj_area_sum,output_options)
log_area_dist = map(x->x.Area.area_distribution,output_options)
log_volume_obj = map(x->x.Volume.obj_volume,output_options)
log_volume_obj_sum = map(x->x.Volume.obj_volume_sum,output_options)
log_volume_dist = map(x->x.Volume.volume_distribution,output_options)
num_obj_area = count(log_area_obj) + num_parents
num_obj_area_sum = count(log_area_obj_sum)
num_dist_area = count(log_area_dist)
num_obj_volume = count(log_volume_obj) + num_parents
num_obj_volume_sum = count(log_volume_obj_sum)
num_dist_volume = count(log_volume_dist)
return classes,(num_border,labels_color,labels_incl,apply_border,border,
log_area_obj,log_area_obj_sum,log_area_dist,log_volume_obj,
log_volume_obj_sum,log_volume_dist,num_obj_area,num_obj_area_sum,
num_dist_area,num_obj_volume,num_obj_volume_sum,num_dist_volume)
end
function fix_output_options(model_data)
if problem_type()==:classification
model_data.output_options = ImageClassificationOutputOptions[]
elseif problem_type()==:regression
model_data.output_options = ImageRegressionOutputOptions[]
end
return nothing
end
function get_data_ext(ext::Symbol)
exts = [:csv,:xlsx,:json,:bson]
ind = findfirst(exts.==ext)
ext_string = [".csv",".xlsx",".json",".bson"]
return ext,ext_string[ind]
end
function get_image_ext(ext::Symbol)
exts = [:png,:tiff,:bson]
ind = findfirst(exts.==ext)
ext_string = [".png",".tiff",".bson"]
return ext,ext_string[ind]
end
# Main function that performs application
function apply_main(model_data::ModelData,all_data::AllData,options::Options,channels::Channels)
# Initialize constants
application_data = all_data.ApplicationData
application_options = options.ApplicationOptions
classes = model_data.classes
fix_output_options(model_data)
output_options = model_data.output_options
use_GPU = false
if options.GlobalOptions.HardwareResources.allow_GPU
if has_cuda()
use_GPU = true
else
@warn "No CUDA capable device was detected. Using CPU instead."
end
end
scaling = application_options.scaling
batch_size = 1
apply_by_file = application_options.apply_by==:file
if problem_type()==:classification
T = Vector{Int64}
elseif problem_type()==:regression
T = Vector{Float32}
elseif problem_type()==:segmentation
T = BitArray{4}
end
data_channel = Channel{Tuple{Int64,T}}(Inf)
# Get file extensions
img_ext,img_ext_string = get_image_ext(application_options.image_type)
data_ext,data_ext_string = get_data_ext(application_options.data_type)
# Get folders and names
folders = application_data.folders
num = length(folders)
urls = application_data.input_urls
urls_batched,filenames_batched = batch_urls_filenames(urls,batch_size)
# Get savepath directory
savepath_main = application_options.savepath
if isempty(savepath_main)
savepath_main = string(pwd(),"/Output data/")
end
# Make savepath directory if does not exist
mkpath(savepath_main)
# Send number of iterations
put!(channels.application_progress,num+sum(length.(urls_batched)))
# Output information
classes,output_info = get_output_info(classes,output_options)
# Prepare output
if problem_type()==:segmentation
num_slices_val = options.GlobalOptions.HardwareResources.num_slices
offset_val = options.GlobalOptions.HardwareResources.offset
else
num_slices_val = 1
offset_val = 0
end
normalization = model_data.normalization
norm_func(x) = model_data.normalization.f(x,normalization.args...)
t = Threads.@spawn get_output(norm_func,classes,num,urls_batched,model_data,
num_slices_val,offset_val,use_GPU,data_channel,channels)
push!(application_data.tasks,t)
# Process output and save data
process_output(classes,output_options,savepath_main,folders,filenames_batched,num,output_info...,
img_ext_string,img_ext,data_ext_string,data_ext,scaling,apply_by_file,data_channel,channels)
return nothing
end
function apply_main2(model_data::ModelData,all_data::AllData,options::Options,channels::Channels)
t = Threads.@spawn apply_main(model_data,all_data,options,channels)
push!(application_data.tasks,t)
return t
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 16531 |
#---Histogram and objects related functions
function objects_area(components_vector::Vector{Array{Int64,2}},labels_incl::Vector{Vector{Int64}},
scaling::Float64,l::Int64)
components = components_vector[l]
scaling = scaling^2
parent_inds = labels_incl[l]
area = component_lengths(components)[2:end]
areas_out = [convert(Vector{Float64},area)./scaling]
if !isempty(parent_inds)
num_parents = length(parent_inds)
for i=1:num_parents
num_components = length(area)
components_parent = components_vector[parent_inds[i]]
obj_parent_inds = Vector{Int64}(undef,num_components)
for j=1:num_components
ind_bool = components.==j
obj_parent_inds[j] = maximum(components_parent[ind_bool])
end
push!(areas_out,obj_parent_inds)
end
end
return areas_out
end
# Makes a 3D representation of a 2D object based on optimising circularity
function func2D_to_3D(objects_mask::BitArray{2})
D = Float32.(distance_transform(feature_transform((!).(objects_mask))))
w = zeros(Float32,(size(D)...,8))
inds = vcat(1:4,6:9)
for i = 1:8
u = zeros(Float32,(9,1))
u[inds[i]] = 1
u = reshape(u,(3,3))
w[:,:,i] = imfilter(D,centered(u))
end
pks = all(D.>=w,dims=3)[:,:] .& objects_mask
mask2 = BitArray(undef,size(objects_mask))
fill!(mask2,true)
mask2[pks] .= false
D2 = Float32.(distance_transform(feature_transform((!).(mask2))))
D2[(!).(objects_mask)] .= 0
mask_out = sqrt.((D+D2).^2-D2.^2)
return mask_out
end
function objects_volume(objects_mask::BitArray{2},components_vector::Vector{Array{Int64,2}},
labels_incl::Vector{Vector{Int64}},scaling::Float64,l::Int64)
components = components_vector[l]
volume_model = func2D_to_3D(objects_mask)
scaling = scaling^3
num_components = maximum(components)
parent_inds = labels_incl[l]
volumes_out_temp = Vector{Float64}(undef,num_components)
for i = 1:num_components
logical_inds = components.==i
pixels = volume_model[logical_inds]
volumes_out_temp[i] = 2*sum(pixels)/scaling
end
volumes_out = [volumes_out_temp]
if !isempty(parent_inds)
num_parents = length(parent_inds)
for i=1:num_parents
components_parent = components_vector[parent_inds[i]]
obj_parent_inds = Vector{Int64}(undef,num_components)
for j=1:num_components
ind_bool = components.==j
obj_parent_inds[j] = maximum(components_parent[ind_bool])
end
push!(volumes_out,obj_parent_inds)
end
end
return volumes_out
end
function data_to_histograms(histograms_area::Vector{Vector{Histogram}},
histograms_volume::Vector{Vector{Histogram}},
objs_area::Vector{Vector{Vector{Float64}}},
objs_volume::Array{Vector{Vector{Float64}}},
output_options::Vector{ImageSegmentationOutputOptions},num_batch::Int64,
num_c::Int64,labels_incl::Vector{Vector{Int64}})
for i = 1:num_batch
temp_histograms_area = histograms_area[i]
temp_histograms_volume = histograms_volume[i]
offset = 0
for l = 1:num_c
current_options = output_options[l]
area_dist_cond = current_options.Area.area_distribution
volume_dist_cond = current_options.Volume.volume_distribution
if area_dist_cond
area_options = current_options.Area
area_values = objs_area[i][l+offset]
if isempty(area_values)
@warn "No objects to export for area."
else
temp_histograms_area[l] = make_histogram(area_values,area_options)
end
end
if volume_dist_cond
volume_options = current_options.Volume
volume_values = objs_volume[i][l+offset]
if isempty(area_values)
@warn "No objects to export for volume."
else
temp_histograms_volume[l] = make_histogram(volume_values,volume_options)
end
end
l_parents = length(labels_incl[l])
offset += l_parents
end
end
return nothing
end
function mask_to_data(objs_area::Vector{Vector{Vector{Float64}}},
objs_volume::Vector{Vector{Vector{Float64}}},cnt::Int64,mask::BitArray{3},
output_options::Vector{ImageSegmentationOutputOptions},
labels_incl::Vector{Vector{Int64}},border::Vector{Bool},num_c::Int64,
num_border::Int64,scaling::Float64)
temp_objs_area = objs_area[cnt]
temp_objs_volume = objs_volume[cnt]
components_vector = Vector{Array{Int64,2}}(undef,num_c)
for l = 1:num_c
ind = l
if border[l]==true
ind = l + num_border + num_c
end
mask_current = mask[:,:,ind]
components = label_components(mask_current,conn(4))
components_vector[l] = components
end
cnt = 1
for l = 1:num_c
current_options = output_options[l]
area_dist_cond = current_options.Area.area_distribution
area_obj_cond = current_options.Area.obj_area
area_sum_obj_cond = current_options.Area.obj_area_sum
volume_dist_cond = current_options.Volume.volume_distribution
volume_obj_cond = current_options.Volume.obj_volume
volume_sum_obj_cond = current_options.Volume.obj_volume_sum
ind = l
if border[l]==true
ind = l + num_border + num_c
end
cnt_add1 = -1
if area_dist_cond || area_obj_cond || area_sum_obj_cond
area_values = objects_area(components_vector,labels_incl,scaling,l)
if area_obj_cond || area_sum_obj_cond
for i = 1:length(area_values)
push!(temp_objs_area[cnt-1+i],area_values[i]...)
cnt_add1+=1
end
end
end
cnt_add2 = -1
if volume_dist_cond || volume_obj_cond || volume_sum_obj_cond
mask_current = mask[:,:,ind]
volume_values = objects_volume(mask_current,components_vector,labels_incl,scaling,l)
if volume_obj_cond || volume_sum_obj_cond
for i = 1:length(volume_values)
push!(temp_objs_volume[cnt-1+i],volume_values[i]...)
cnt_add2+=1
end
end
end
cnt = cnt + 1 + max(cnt_add1,cnt_add2,0)
end
return nothing
end
function make_histogram(values::Vector{<:Real}, options::Union{OutputArea,OutputVolume})
if options.binning==:auto
h = fit(Histogram, values)
elseif options.binning==:number_of_bins
maxval = maximum(values)
minval = minimum(values)
dif = maxval-minval
step = dif/(options.value-1)
bins = minval:step:maxval
h = fit(Histogram, values,bins)
else # options.binning==:bin_width
num = round(maximum(values)/options.value)
h = fit(Histogram, values, nbins=num)
end
h = normalize(h, mode=options.normalization)
return h
end
function histograms_to_dataframe(df::DataFrame,histograms::Vector{Histogram},
num::Int64,offset::Int64)
inds = 1:2:2*num
for j = 1:num
ws = histograms[j].weights
numel = length(ws)
edges = collect(histograms[j].edges[1])
edges = map(ind->mean([edges[ind],edges[ind+1]]),1:numel)
df[1:numel,inds[j]+offset] .= edges
df[1:numel,inds[j]+offset+1] .= ws
end
end
function objs_to_dataframe(df::DataFrame,objs::Vector{Vector{Float64}},
num::Int64,offset::Int64)
for j = 1:num
objs_current = objs[j]
numel = length(objs_current)
df[1:numel,j+offset] .= objs_current
end
end
function objs_to_dataframe(df::DataFrame,objs::Vector{Float64},
num::Int64,offset::Int64)
start = offset + 1
finish = num + start -1
df[1,start:finish] .= objs
end
function get_dataframe_dists_names(names::Vector{String},type::String,
log_dist::Vector{Bool})
names_x = String[]
inds = findall(log_dist)
for i in inds
name_current = names[i]
name_edges = string(name_current,"_",type,"_edges")
name_weights = string(name_current,"_",type,"_weights")
push!(names_x,name_edges,name_weights)
end
return names_x
end
function export_histograms(histograms_area::Vector{Vector{Histogram}},
histograms_volume::Vector{Vector{Histogram}},classes::Vector{ImageSegmentationClass},
num::Int64,num_dist_area::Int64,num_dist_volume::Int64,log_area_dist::Vector{Bool},
log_volume_dist::Vector{Bool},savepath::String,filenames::Vector{String},
data_ext_string::String,data_ext::Symbol)
num_cols_dist = num_dist_area + num_dist_volume
if num_cols_dist==0
return nothing
end
for i = 1:num
if !isassigned(histograms_area,i)
continue
end
num_cols_dist = num_dist_area + num_dist_volume
if num_dist_area>0
num_rows_area = maximum(map(x->length(x.weights),histograms_area[i]))
else
num_rows_area = 0
end
if num_dist_volume>0
num_rows_volume = maximum(map(x->length(x.weights),histograms_volume[i]))
else
num_rows_volume = 0
end
num_rows = max(num_rows_area,num_rows_volume)
histogram_area = histograms_area[i]
histogram_volume = histograms_volume[i]
rows = Vector{Union{Float64,String}}(undef,num_rows)
fill!(rows,"")
df_dists = DataFrame(repeat(rows,1,2*num_cols_dist), :auto)
histograms_to_dataframe(df_dists,histogram_area,num_dist_area,0)
offset = 2*num_dist_area
histograms_to_dataframe(df_dists,histogram_volume,num_dist_volume,offset)
names = map(x->x.name,classes)
names_area = get_dataframe_dists_names(names,"area",log_area_dist)
names_volume = get_dataframe_dists_names(names,"volume",log_volume_dist)
names_all = vcat(names_area,names_volume)
rename!(df_dists, Symbol.(names_all))
fname = filenames[i]
name = string("Distributions ",fname,data_ext_string)
save(df_dists,savepath,name,data_ext)
end
return nothing
end
function get_dataframe_objs_names(names::Vector{String},type::String,
labels_incl::Vector{Vector{Int64}},log_obj::Vector{Bool})
names_x = String[]
inds = findall(log_obj)
for i in inds
name_current = names[i]
name = string(name_current,"_",type)
push!(names_x,name)
parents = labels_incl[i]
num_parents = length(parents)
for j=1:num_parents
parent = names[parents[j]]
name_parent_ind = string(name,"_",parent,"_index")
push!(names_x,name_parent_ind)
end
end
return names_x
end
function get_dataframe_objs_sum_names(names::Vector{String},type::String,
log_obj::Vector{Bool})
names_x = String[]
inds = findall(log_obj)
for i in inds
name_current = names[i]
name = string(name_current,"_",type)
push!(names_x,name)
end
return names_x
end
type_name = "Objects"
function export_objs(type_name::String,objs_area::Vector,
objs_volume::Vector,classes::Vector{ImageSegmentationClass},
num::Int64,num_obj_area::Int64,num_obj_volume::Int64,log_area_obj::Vector{Bool},
log_volume_obj::Vector{Bool},labels_incl::Vector{Vector{Int64}},savepath::String,
filenames::Vector{String},data_ext_string::String,data_ext::Symbol)
num_cols_obj = num_obj_area + num_obj_volume
if num_cols_obj==0
return nothing
end
for i = 1:num
if num_obj_area>0
num_rows_area = maximum(map(x->length(x),objs_area[i]))
else
num_rows_area = 0
end
if num_obj_volume>0
num_rows_volume = maximum(map(x->length(x),objs_volume[i]))
else
num_rows_volume = 0
end
num_rows = max(num_rows_area,num_rows_volume)
obj_area = objs_area[i]
obj_volume = objs_volume[i]
rows = Vector{Union{Float64,String}}(undef,num_rows)
fill!(rows,"")
df_objs = DataFrame(repeat(rows,1,num_cols_obj), :auto)
objs_to_dataframe(df_objs,obj_area,num_obj_area,0)
offset = num_obj_area
objs_to_dataframe(df_objs,obj_volume,num_obj_volume,offset)
names = map(x->x.name,classes)
if type_name=="Objects"
names_area = get_dataframe_objs_names(names,"area",labels_incl,log_area_obj)
names_volume = get_dataframe_objs_names(names,"volume",labels_incl,log_volume_obj)
else
names_area = get_dataframe_objs_sum_names(names,"area",log_area_obj)
names_volume = get_dataframe_objs_sum_names(names,"volume",log_volume_obj)
end
names_all = vcat(names_area,names_volume)
rename!(df_objs, Symbol.(names_all))
fname = filenames[i]
name = string(type_name," ",fname,data_ext_string)
save(df_objs,savepath,name,data_ext)
end
return nothing
end
#---Image related functions
function get_save_image_info(num_dims::Int64,classes::Vector{ImageSegmentationClass},
output_options::Vector{ImageSegmentationOutputOptions},border::Vector{Bool})
num_c = length(border)
num_border = sum(border)
logical_inds = BitArray{1}(undef,num_dims)
img_names = Vector{String}(undef,num_c+num_border*2)
for i = 1:num_c
class = classes[i]
class_name = class.name
if output_options[i].Mask.mask
logical_inds[i] = true
img_names[i] = class_name
end
if class.BorderClass.enabled
if output_options[i].Mask.mask_border
ind = i + num_c
logical_inds[ind] = true
img_names[ind] = string(class_name," (border)")
end
if output_options[i].Mask.mask_applied_border
ind = num_c + num_border + i
logical_inds[ind] = true
img_names[ind] = string(class_name," (applied border)")
end
end
end
inds = findall(logical_inds)
return inds,img_names
end
function mask_to_img(mask::BitArray{3},classes::Vector{ImageSegmentationClass},
output_options::Vector{ImageSegmentationOutputOptions},
labels_color::Vector{Vector{Float64}},border::Vector{Bool},
savepath::String,filename::String,ext::String,sym_ext::Symbol)
num_dims = size(mask)[3]
inds,img_names = get_save_image_info(num_dims,classes,output_options,border)
if isempty(inds)
return nothing
end
border_colors = labels_color[findall(border)]
labels_color = vcat(labels_color,border_colors,border_colors)
perm_labels_color64 = map(x -> permutedims(x[:,:,:]/255,[3,2,1]),labels_color)
perm_labels_color = convert(Array{Array{Float32,3}},perm_labels_color64)
path = joinpath(savepath,filename)
if !isdir(path)
mkdir(path)
end
for j = 1:length(inds)
ind = inds[j]
mask_current = mask[:,:,ind]
color = perm_labels_color[ind]
mask_float = convert(Array{Float32,2},mask_current)
mask_dim3 = cat(mask_float,mask_float,mask_float,dims=Val(3))
mask_dim3 = mask_dim3.*color
mask_dim3 = cat(mask_dim3,mask_float,dims=Val(3))
mask_dim3 = permutedims(mask_dim3,[3,1,2])
mask_RGB = colorview(RGBA,mask_dim3)
img_name = img_names[ind]
name = string(img_name," ",filename,ext)
save(mask_RGB,path,name,sym_ext)
end
return nothing
end
function save(data,path::String,name::String,ext::Symbol)
if !isdir(path)
dirs = splitpath(path)
start = length(dirs) - 3
for i=start:length(dirs)
temp_path = join(dirs[1:i],'/')
if !isdir(temp_path)
mkdir(temp_path)
end
end
end
url = joinpath(path,name)
if isfile(url)
rm(url)
end
if ext==:json
open(url,"w") do f
JSON_pkg.print(f,data)
end
elseif ext==:bson
BSON.@save(url,data)
elseif ext==:xlsx
XLSX.writetable(url, collect(DataFrames.eachcol(data)), DataFrames.names(data))
else
FileIO.save(url,data)
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 247 |
module Classes
# Import packages
using
# EasyML ecosystem
..Common, ..Common.Classes
# Include functions
include(string(common_dir(),"/common/classes_design.jl"))
include("main.jl")
include("exported_functions.jl")
export change_classes
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 829 |
"""
change_classes()
Opens a GUI for addition or changing of classes.
"""
function change_classes()
classes = model_data.classes
if length(classes)==0
ids = [0]
JindTree = -1
else
ids = 1:length(classes)
JindTree = 0
end
@qmlfunction(
# Classes
get_class_field,
num_classes,
append_classes,
reset_classes,
# Problem
get_problem_type,
set_problem_type,
# Options
get_options,
# Other
unit_test
)
path_qml = string(@__DIR__,"/gui/ClassDialog.qml")
gui_dir = string("file:///",replace(@__DIR__, "\\" => "/"),"/gui/")
text = add_templates(path_qml)
loadqml(QByteArray(text), gui_dir = gui_dir, JindTree = JindTree, ids = ids)
exec()
return nothing
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 3118 |
#---make_classes functions---------------------------------------
function reset_classes_main(model_data)
if problem_type()==:classification
model_data.classes = Vector{ImageClassificationClass}(undef,0)
elseif problem_type()==:regression
model_data.classes = Vector{ImageRegressionClass}(undef,0)
elseif problem_type()==:segmentation
model_data.classes = Vector{ImageSegmentationClass}(undef,0)
end
return nothing
end
reset_classes() = reset_classes_main(model_data::ModelData)
function append_classes_main(model_data::ModelData,data)
data = fix_QML_types(data)
type = eltype(model_data.classes)
if problem_type()==:classification
class = ImageClassificationClass()
class.name = data[1]
elseif problem_type()==:regression
class = ImageRegressionClass()
class.name = data[1]
elseif problem_type()==:segmentation
class = ImageSegmentationClass()
class.name = String(data[1])
class.color = Int64.([data[2],data[3],data[4]])
class.parents = data[5]
class.overlap = Bool(data[6])
class.min_area = Int64(data[7])
class.BorderClass.enabled = Bool(data[8])
class.BorderClass.thickness = Int64(data[9])
end
push!(model_data.classes,class)
return nothing
end
append_classes(data) = append_classes_main(model_data,data)
function num_classes_main(model_data::ModelData)
return length(model_data.classes)
end
num_classes() = num_classes_main(model_data::ModelData)
function get_class_main(model_data::ModelData,index,fieldname)
fieldname = fix_QML_types(fieldname)
index = Int64(index)
if fieldname isa Vector
fieldnames = Symbol.(fieldname)
data = model_data.classes[index]
for field in fieldnames
data = getproperty(data,field)
end
return data
else
return getproperty(model_data.classes[index],Symbol(fieldname))
end
end
get_class_field(index,fieldname) = get_class_main(model_data,index,fieldname)
function get_class_data(classes::Vector{ImageSegmentationClass})
num = length(classes)
class_names = Vector{String}(undef,num)
class_parents = Vector{Vector{String}}(undef,num)
labels_color = Vector{Vector{Float64}}(undef,num)
labels_incl = Vector{Vector{Int64}}(undef,num)
for i=1:num
class = classes[i]
class_names[i] = classes[i].name
class_parents[i] = classes[i].parents
labels_color[i] = class.color
end
for i=1:num
labels_incl[i] = findall(any.(map(x->x.==class_parents[i],class_names)))
end
class_inds = Vector{Int64}(undef,0)
for i = 1:num
if !classes[i].overlap
push!(class_inds,i)
end
end
num = length(class_inds)
border = Vector{Bool}(undef,num)
border_thickness = Vector{Int64}(undef,num)
for i in class_inds
class = classes[i]
border[i] = class.BorderClass.enabled
border_thickness[i] = class.BorderClass.thickness
end
return class_inds,labels_color,labels_incl,border,border_thickness
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 2443 |
module Common
# Include dependencies
using
# Interfacing
QML, Qt5QuickControls2_jll,
# Data structuring
Parameters, Dates,
# Data import/export
BSON,
# Image manipulation
Images, ColorTypes, ImageFiltering, ImageTransformations,
ImageMorphology, DSP, ImageMorphology.FeatureTransform, ImageSegmentation,
# Maths
Statistics,
# Machine learning
Flux, Flux.Losses, FluxExtra,
# Other
FLoops
include("misc.jl")
# Include modules
include("data structures/Classes.jl")
include("data structures/Design.jl")
include("data structures/DataPreparation.jl")
include("data structures/Application.jl")
include("data structures/Training.jl")
include("data structures/Validation.jl")
using .Classes, .Design.Layers
import .Design: DesignData, design_data, DesignOptions, design_options
import .DataPreparation: PreparationData, preparation_data, DataPreparationOptions, data_preparation_options
import .Training: TrainingData, TestingData, training_data, testing_data, TrainingOptions, training_options
import .Validation: ValidationData, validation_data, ValidationOptions, validation_options
import .Application: AbstractOutputOptions, ImageClassificationOutputOptions, ImageRegressionOutputOptions,
ImageSegmentationOutputOptions, ApplicationData, application_data, ApplicationOptions, application_options
# Include data structures and functions
include("data_structures.jl")
include("functions.jl")
include("image_processing.jl")
# Struct to Dict interconversion
export struct_to_dict!, dict_to_struct!, to_struct!
# Model data
export model_data, ModelData, AbstractModel, set_savepath, save_model, load_model,
ImageClassificationClass, ImageRegressionClass, BorderClass, ImageSegmentationClass, none
# Options
export change, options, Options, global_options, GlobalOptions, save_options, load_options
# GUI data handling
export fix_QML_types, get_data, get_options, set_data, set_options, get_file, get_folder
# Channels
export channels, Channels, check_progress, get_progress, empty_channel, put_channel
# Other
export all_data, AllData, problem_type, input_type, check_task, unit_test, common_dir, add_templates, setproperty!
# Image processing
export dilate!, erode!, closing!, areaopen!, outer_perim, rotate_img, conn, conn,
component_intensity, segment_objects, allequal, alldim
# QML functions
export QML, @qmlfunction, QByteArray, loadqml, exec
# Machine learning
export Flux, Losses, FluxExtra, Normalizations, NNlib
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 3852 |
#---Channels------------------------------------------------------------------
@with_kw struct Channels
data_preparation_progress::Channel = Channel{Int64}(Inf)
training_start_progress::Channel = Channel{NTuple{3,Int64}}(1)
training_progress::Channel = Channel{Tuple{String,Float32,Float32,Int64}}(Inf)
training_modifiers::Channel = Channel{Tuple{Int64,Float64}}(Inf) # 0 - abort; 1 - learning rate; 2 - epochs; 3 - number of tests
validation_start::Channel{Int64} = Channel{Int64}(1)
validation_progress::Channel{NTuple{2,Float32}} = Channel{NTuple{2,Float32}}(Inf)
validation_modifiers::Channel{Tuple{Int64,Float64}} = Channel{Tuple{Int64,Float64}}(Inf) # 0 - abort
application_progress::Channel = Channel{Int64}(Inf)
end
channels = Channels()
#---Model data----------------------------------------------------------------
@with_kw mutable struct ModelData
model::AbstractModel = Flux.Chain()
normalization::Normalization = Normalization(none,())
loss::Function = Flux.Losses.mse
input_size::Union{Tuple{Int64},NTuple{3,Int64}} = (0,)
output_size::Union{Tuple{Int64},NTuple{3,Int64}} = (0,)
problem_type::Symbol = :classification
input_type::Symbol = :image
input_properties::Vector{Symbol} = Vector{Symbol}(undef,0)
classes::Vector{<:AbstractClass} = Vector{ImageClassificationClass}(undef,0)
output_options::Vector{<:AbstractOutputOptions} = Vector{ImageClassificationOutputOptions}(undef,0)
layers_info::Vector{AbstractLayerInfo} = Vector{AbstractLayerInfo}(undef,0)
end
model_data = ModelData()
function Base.setproperty!(obj::ModelData,k::Symbol,value::Union{Symbol,Vector{Symbol}})
if k==:problem_type
syms = (:classification,:regression,:segmentation)
check_setfield!(obj,k,value,syms)
elseif k==:input_type
syms = (:image,)
check_setfield!(obj,k,value,syms)
elseif k==:input_properties
syms = (:grayscale,)
check_setfield!(obj,k,value,syms)
end
return nothing
end
#---Data-------------------------------------------------------------------
@with_kw mutable struct AllDataUrls
model_url::String = ""
model_name::String = ""
end
all_data_urls = AllDataUrls()
@with_kw mutable struct AllData
DesignData::DesignData = design_data
PreparationData::PreparationData = preparation_data
TrainingData::TrainingData = training_data
TestingData::TestingData = testing_data
ValidationData::ValidationData = validation_data
ApplicationData::ApplicationData = application_data
Urls::AllDataUrls = all_data_urls
end
all_data = AllData()
#---Options-----------------------------------------------------------------
# Global options
@with_kw mutable struct Graphics
scaling_factor::Float64 = 1.0
end
graphics = Graphics()
@with_kw mutable struct HardwareResources
allow_GPU::Bool = true
num_threads::Int64 = Threads.nthreads()
num_slices::Int64 = 1
offset::Int64 = 20
end
hardware_resources = HardwareResources()
@with_kw struct GlobalOptions
Graphics::Graphics = graphics
HardwareResources::HardwareResources = hardware_resources
end
global_options = GlobalOptions()
# All options
@with_kw struct Options
GlobalOptions::GlobalOptions = global_options
DesignOptions::DesignOptions = design_options
DataPreparationOptions::DataPreparationOptions = data_preparation_options
TrainingOptions::TrainingOptions = training_options
ValidationOptions::ValidationOptions = validation_options
ApplicationOptions::ApplicationOptions = application_options
end
options = Options()
#---Testing--------------------------------------------------------------------
@with_kw mutable struct UnitTest
state = false
urls = String[]
url_pusher = () -> popfirst!(urls)
end
unit_test = UnitTest()
(m::UnitTest)() = m.state | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 15924 |
#---Model_data------------------------------------------------------
problem_type() = model_data.problem_type
input_type() = model_data.input_type
#---Model saving/loading--------------------------------------------
"""
set_savepath(url::String)
Sets a path where a model will be saved.
"""
function set_savepath(url::String)
url_split = split(url,('\\','/','.'))
if isempty(url_split) || url_split[end]!="model"|| length(url_split)<2
@error "The model name should end with a '.model' extension."
return nothing
end
all_data.Urls.model_url = url
all_data.Urls.model_name = url_split[end-1]
return nothing
end
"""
save_model(url::String)
Saves a model to a specified URL. The URL can be absolute or relative.
Use '.model' extension.
"""
function save_model(url::AbstractString)
url = fix_QML_types(url)
# Make folders if needed
if '\\' in url || '/' in url
url_split = split(url,('/','\\'))[1:end-1]
url_dir = reduce((x,y) -> join([x,y],'/'),url_split)
mkpath(url_dir)
end
# Serialize and save model
dict_raw = Dict{Symbol,Any}()
struct_to_dict!(dict_raw,model_data)
dict = Dict{Symbol,IOBuffer}()
ks = keys(dict_raw)
vs = values(dict_raw)
for (k,v) in zip(ks,vs)
buffer = IOBuffer()
d = Dict(:field => v)
BSON.bson(buffer,d)
dict[k] = buffer
end
BSON.@save(url,dict)
return nothing
end
"""
save_model()
Opens a file dialog where you can select where to save a model and how it should be called.
"""
function save_model()
name_filters = ["*.model"]
if isempty(all_data_urls.model_name)
all_data_urls.model_name = "new_model"
end
filename = string(all_data_urls.model_name,".model")
url_out = String[""]
observe(url) = url_out[1] = url
# Launch GUI
@qmlfunction(observe,unit_test)
path_qml = string(@__DIR__,"/gui/UniversalSaveFileDialog.qml")
text = add_templates(path_qml)
loadqml(QByteArray(text),name_filters = name_filters,
filename = filename)
exec()
if unit_test()
url_out[1] = unit_test.url_pusher()
end
if !isempty(url_out[1])
save_model(url_out[1])
end
return nothing
end
"""
load_model(url::String)
Loads a model from a specified URL. The URL can be absolute or relative.
"""
function load_model(url::AbstractString)
url = fix_QML_types(url)
if isfile(url)
loaded_data = BSON.load(url)[:dict]
else
error(string(url, " does not exist."))
end
fnames = fieldnames(ModelData)
ks = collect(keys(loaded_data))
ks = intersect(ks,fnames)
if loaded_data[ks[1]] isa IOBuffer
for k in ks
try
serialized = seekstart(loaded_data[k])
deserialized = BSON.load(serialized)[:field]
if deserialized isa Dict
dict_to_struct!(model_data.normalization,deserialized)
elseif deserialized isa NamedTuple
to_struct!(model_data,k,deserialized)
elseif deserialized isa Vector
type = typeof(getproperty(model_data,k))
deserialized_typed = convert(type,deserialized)
setproperty!(model_data,k,deserialized_typed)
else
setproperty!(model_data,k,deserialized)
end
catch e
@warn string("Loading of ",k," failed.") exception=(e, catch_backtrace())
end
end
else
# EasyML v0.1 partial compatibility
dict_to_struct!(model_data,loaded_data)
end
all_data_urls.model_url = url
url_split = split(url,('/','.'))
all_data_urls.model_name = url_split[end-1]
return nothing
end
"""
load_model()
Opens a file dialog where you can select a model to be loaded and loads it.
"""
function load_model()
name_filters = ["*.model"]
url_out = String[""]
observe(url) = url_out[1] = url
# Launch GUI
@qmlfunction(observe,unit_test)
path_qml = string(@__DIR__,"/gui/UniversalFileDialog.qml")
text = add_templates(path_qml)
loadqml(QByteArray(text),name_filters = name_filters)
exec()
if unit_test()
url_out[1] = unit_test.url_pusher()
end
# Load model
if !isempty(url_out[1])
load_model(url_out[1])
end
return nothing
end
#---Options saving/loading--------------------------------------------------
"""
change(global_options::GlobalOptions)
Allows to change `global_options` in a GUI.
"""
function change(data::GlobalOptions)
@qmlfunction(
max_num_threads,
get_options,
set_options,
save_options,
# Other
unit_test
)
path_qml = string(@__DIR__,"/gui/GlobalOptions.qml")
text = add_templates(path_qml)
loadqml(QByteArray(text))
exec()
return nothing
end
function save_options_main(options)
dict = Dict{Symbol,Any}()
struct_to_dict!(dict,options)
BSON.@save("options.bson",dict)
return nothing
end
"""
save_options()
Saves options to `options.bson`. Uses present working directory.
It is run automatically after changing options in a GUI window.
"""
save_options() = save_options_main(options)
function load_options_main(options)
if isfile("options.bson")
try
data = BSON.load("options.bson")
dict_to_struct!(options,data[:dict])
catch e
@error string("Options were not loaded. Error: ",e)
save_options()
end
else
save_options()
end
return nothing
end
"""
load_options()
Loads options from your previous run which are located in `options.bson`.
Uses present working directory. It is run automatically after loading the package.
"""
load_options() = load_options_main(options)
#---GUI-------------------------------------------------------------------
function QML.loadqml(text::QByteArray; kwargs...)
qml_engine = init_qmlengine()
ctx = root_context(QML.CxxRef(qml_engine))
for (key,value) in kwargs
set_context_property(ctx, String(key), value)
end
component = QQmlComponent(qml_engine)
QML.set_data(component, text, QUrl())
create(component, qmlcontext())
return component
end
#---GUI data handling-----------------------------------------------------
# Convert QML types to Julia types
function fix_QML_types(var)
if var isa AbstractString
return String(var)
elseif var isa Integer
return Int64(var)
elseif var isa AbstractFloat
return Float64(var)
elseif var isa QML.QListAllocated
return fix_QML_types.(QML.value.(var))
else
return var
end
end
# Allows to read data from GUI
function get_data_main(data,fields,inds)
fields::Vector{String} = fix_QML_types(fields)
inds = fix_QML_types(inds)
for i = 1:length(fields)
field = Symbol(fields[i])
data = getproperty(data,field)
end
if !(isempty(inds))
for i = 1:length(inds)
data = data[inds[i]]
end
end
if data isa Symbol
data = string(data)
end
return data
end
get_data(fields,inds=[]) = get_data_main(all_data,fields,inds)
get_options(fields,inds=[]) = get_data_main(options,fields,inds)
# Allows to write data from GUI
function set_data_main(data,fields,args)
fields::Vector{String} = fix_QML_types(fields)
field_end = Symbol(fields[end])
args = fix_QML_types(args)
for i = 1:length(fields)-1
field = Symbol(fields[i])
data = getproperty(data,field)
end
if length(args)==1
type = typeof(getproperty(data,field_end))
value = args[1]
if getproperty(data,field_end) isa Symbol
value = type(lowercase(value))
else
value = type(value)
end
elseif length(args)==2
inds = args[1]
value = getproperty(data,field_end)
value_temp = value
if !(isempty(inds))
for i = 1:(length(inds)-1)
value_temp = value_temp[inds[i]]
end
end
type = typeof(value_temp[inds[end]])
value_temp[inds[end]] = type(args[2])
end
setproperty!(data, field_end, value)
return nothing
end
set_data(fields,args...) = set_data_main(all_data,fields,args)
set_options(fields,args...) = set_data_main(options,fields,args)
function get_folder(dir = "")
url_out = String[""]
observe() = url_out[1]
@qmlfunction(observe,unit_test)
path_qml = string(@__DIR__,"/gui/UniversalFolderDialog.qml")
text = add_templates(path_qml)
loadqml(QByteArray(text),currentfolder = dir)
exec()
if unit_test()
url_out[1] = unit_test.url_pusher()
end
return url_out[1]
end
function get_file(dir = "", name_filters = [])
url_out = String[""]
observe() = url_out[1]
@qmlfunction(observe,unit_test)
path_qml = string(@__DIR__,"/gui/UniversalFileDialog.qml")
text = add_templates(path_qml)
loadqml(QByteArray(text),
currentfolder = dir,
name_filters = name_filters)
exec()
if unit_test()
url_out[1] = unit_test.url_pusher()
end
return url_out[1]
end
#---Handling channels---------------------------------------------------------
# Return a value from progress channels without taking the value
function check_progress_main(channels,field::AbstractString)
field::String = fix_QML_types(field)
field_sym = Symbol(field)
channel = getfield(channels,field_sym)
if isready(channel)
return fetch(channel)
else
return false
end
end
check_progress(field) = check_progress_main(channels,field)
# Return a value from progress channels by taking the value
function get_progress_main(channels,field::AbstractString)
field::String = fix_QML_types(field)
field_sym = Symbol(field)
channel = getfield(channels,field_sym)
if isready(channel)
value_raw = take!(channel)
if value_raw isa Tuple
value = [value_raw...]
else
value = value_raw
end
return value
else
return false
end
end
function get_progress_main(channels,field::Symbol)
channel = getfield(channels,field)
if isready(channel)
value_raw = take!(channel)
if value_raw isa Tuple
value = [value_raw...]
else
value = value_raw
end
return value
else
return false
end
end
get_progress(field) = get_progress_main(channels,field)
function empty_channel_main(channels,field::AbstractString)
field::String = fix_QML_types(field)
field_sym = Symbol(field)
channel = getfield(channels,field_sym)
while true
if isready(channel)
take!(channel)
else
return nothing
end
end
end
function empty_channel_main(channels,field::Symbol)
channel = getproperty(channels,field)
while true
if isready(channel)
take!(channel)
else
return
end
end
end
empty_channel(field) = empty_channel_main(channels,field)
function put_channel_main(channels,field::AbstractString,value)
field = fix_QML_types(field)
value = fix_QML_types(value)
field_sym = Symbol(field)
channel = getfield(channels,field_sym)
if value isa Vector
value_raw::Vector{Float64} = fix_QML_types(value)
value1 = convert(Int64,value_raw[1])
value2 = convert(Float64,value_raw[2])
value = (value1,value2)
end
put!(channel,value)
return nothing
end
put_channel(field,value) = put_channel_main(channels,field,value)
#---Struct related functions--------------------------------------------------
function struct_to_dict!(dict,obj)
ks = fieldnames(typeof(obj))
for k in ks
value = getproperty(obj,k)
type = typeof(value)
if occursin("EasyML",string(parentmodule(type))) && !(type<:Function)
dict_current = Dict{Symbol,Any}()
dict[k] = dict_current
struct_to_dict!(dict_current,value)
elseif value isa Vector && !isempty(value) && occursin("EasyML",string(parentmodule(eltype(type))))
types = typeof.(value)
dict_vec = Vector{Dict{Symbol,Any}}(undef,0)
for obj_for_vec in value
dict_for_vec = Dict{Symbol,Any}()
struct_to_dict!(dict_for_vec,obj_for_vec)
push!(dict_vec,dict_for_vec)
end
data_tuple = (vector_type = string(type), types = string(types), values = dict_vec)
dict[k] = data_tuple
else
dict[k] = value
end
end
return nothing
end
function fix_type(vector_type_string::String)
vector_type_string_split = split(vector_type_string,".")
if length(vector_type_string_split)==1
return vector_type_string
else
vector_type_string = join(vector_type_string_split[[1,end]],".")
vector_type_string = replace(vector_type_string, "EasyML."=>"")
return vector_type_string
end
end
function fix_type_layers_info(vector_type_string::String)
vector_type_string = replace(vector_type_string, "EasyML.Common.Design.Layers."=>"")
vector_type_string = replace(vector_type_string, "EasyML.Common.Application."=>"")
return vector_type_string
end
function to_struct!(obj,sym::Symbol,value::NamedTuple)
if !isempty(value)
vector_type_string = fix_type(getindex(value,:vector_type))
vector_type = eval(Meta.parse(vector_type_string))
types_string = fix_type_layers_info(getindex(value,:types))
types = eval.(Meta.parse.(types_string))
vals = getindex(value,:values)
struct_vec = vector_type(undef,0)
for j = 1:length(types)
obj_for_vec = types[j]()
dict_for_vec = vals[j]
dict_to_struct!(obj_for_vec,dict_for_vec)
push!(struct_vec,obj_for_vec)
end
setproperty!(obj,sym,struct_vec)
end
return nothing
end
function dict_to_struct!(obj,dict::Dict)
ks = [keys(dict)...]
for i = 1:length(ks)
ks_cur = ks[i]
sym = Symbol(ks_cur)
value = dict[ks_cur]
if hasproperty(obj,sym)
obj_property = getproperty(obj,sym)
if value isa Dict
dict_to_struct!(obj_property,value)
else
try
setproperty!(obj,sym,value)
catch e
@warn string("Loading of ",string(sym)," in ",string(obj)," failed.") exception=(e, catch_backtrace())
end
end
end
end
return nothing
end
#---Other-------------------------------------------
function findline(lines::Vector{String})
for i = 1:20
line = lines[i]
if i>2
if occursin("import",line)
if occursin("templates",line)
return i
end
else
return i
end
end
end
end
common_dir() = string(replace(@__DIR__, "\\" => "/"))
function add_templates(url::String)
f = open(url, read=true)
seekstart(f)
lines = readlines(f)
close(f)
ind = findline(lines)
if ind!=0
dir = string("file:///",string(common_dir(),"/gui/templates"))
templates_line = string("import ",'"',dir,'"')
lines[ind] = templates_line
end
text = join(lines,"\n")
return text
end
function max_num_threads()
return length(Sys.cpu_info())
end
num_threads() = hardware_resources.num_threads
function check_task(t::Task)
if istaskdone(t)
if t.:_isexception
return :error, t.:result
else
return :done, nothing
end
else
return :running, nothing
end
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 4435 |
function Images.dilate!(array::BitArray{2},num::Int64)
for _ = 1:num
ImageMorphology.dilate!(array)
end
return(array)
end
function erode!(array::BitArray{2},num::Int64)
for _ = 1:num
ImageMorphology.erode!(array)
end
return(array)
end
function closing!(array::BitArray{2},num::Int64)
dilate!(array,num)
erode!(array,num)
return array
end
function outer_perim(array::BitArray{2})
array2 = copy(array)
dilate!(array2,1)
return xor.(array2,array)
end
function areaopen!(im::BitArray{2},area::Int64)
im_segm = label_components(im)
num = maximum(im_segm)
chunk_size = convert(Int64,round(num/num_threads()))
@floop ThreadedEx(basesize = chunk_size) for i=1:num
mask = im_segm.==i
if sum(mask)<area
im[mask] .= false
end
end
return
end
function replace_nan!(x)
type = eltype(x)
for i = eachindex(x)
if isnan(x[i])
x[i] = zero(type)
end
end
end
function rotate_img(img::AbstractArray{T,3},angle_val::Float64) where T<:AbstractFloat
if angle_val!=0
img_out = copy(img)
for i = 1:size(img,3)
slice = img[:,:,i]
temp = imrotate(slice,angle_val,axes(slice))
replace_nan!(temp)
img_out[:,:,i] = convert.(T,temp)
end
return(img_out)
else
return(img)
end
end
function rotate_img(img::BitArray{3},angle_val::Float64)
if angle_val!=0
img_out = copy(img)
for i = 1:size(img,3)
slice = img[:,:,i]
temp = imrotate(slice,angle_val,axes(slice))
replace_nan!(temp)
img_out[:,:,i] = temp.>0
end
return(img_out)
else
return(img)
end
end
function alldim(array::BitArray{2},dim::Int64)
vec = BitArray(undef, size(array,dim))
if dim==1
for i=1:length(vec)
vec[i] = all(array[i,:])
end
elseif dim==2
for i=1:length(vec)
vec[i] = all(array[:,i])
end
end
return vec
end
function conn(num::Int64)
if num==4
kernel = [false true false
true true true
false true false]
else
kernel = [true true true
true true true
true true true]
end
return kernel
end
function component_intensity(components::Array{Int64},image::Array{Float32})
num = maximum(components)
intensities = Vector{Float32}(undef,num)
for i = 1:num
intensities[i] = mean(image[components.==i])
end
return intensities
end
function segment_objects(components::Array{Int64,2},objects::BitArray{2})
img_size = size(components)[1:2]
initial_indices = findall(components.!=0)
operations = [(0,1),(1,0),(0,-1),(-1,0),(1,-1),(-1,1),(-1,-1),(1,1)]
new_components = copy(components)
indices_out = initial_indices
while length(indices_out)!=0
indices_in = indices_out
indices_accum = Vector{Vector{CartesianIndex{2}}}(undef,0)
for i = 1:4
target = repeat([operations[i]],length(indices_in))
new_indices = broadcast((x,y) -> x .+ y,
Tuple.(indices_in),target)
objects_values = objects[indices_in]
target = repeat([(0,0)],length(new_indices))
nonzero_bool = broadcast((x,y) -> all(x .> y),
new_indices,target)
target = repeat([img_size],length(new_indices))
correct_size_bool = broadcast((x,y) -> all(x.<img_size),
new_indices,target)
remove_incorrect = nonzero_bool .&
correct_size_bool .& objects_values
new_indices = new_indices[remove_incorrect]
values = new_components[CartesianIndex.(new_indices)]
new_indices_0_bool = values.==0
new_indices_0 = map(x-> CartesianIndex(x),
new_indices[new_indices_0_bool])
indices_prev = indices_in[remove_incorrect][new_indices_0_bool]
prev_values = new_components[CartesianIndex.(indices_prev)]
new_components[new_indices_0] .= prev_values
push!(indices_accum,new_indices_0)
end
indices_out = reduce(vcat,indices_accum)
end
return new_components
end
function allequal(itr::Union{Array,Tuple})
return length(itr)==0 || all( ==(itr[1]), itr)
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 1473 |
const AbstractModel = Union{Flux.Chain}
function none(data)
return Float32[]
end
@with_kw mutable struct Normalization
f::Function = none
args::Tuple = ()
end
function sym_to_string(sym::Symbol)
return string(":",string(sym))
end
function msg_generator(value::Symbol,syms::NTuple{N,Symbol}) where N
local msg_end
msg_start = string(sym_to_string(value)," is not allowed. ")
msg_mid = "Value should be "
if N==1
msg_end = string(sym_to_string.(syms[1]),".")
elseif N==2
msg_end = string(join(sym_to_string.(syms), " or "),".")
else
msg_end = sym_to_string(syms[1])
for i = 2:length(syms)-1
msg_end = string(msg_end,", ",sym_to_string(syms[i]))
end
msg_end = string(msg_end," or ",sym_to_string(syms[end]),".")
end
msg = string(msg_start,msg_mid,msg_end)
return msg
end
function check_setfield!(obj,k::Symbol,value::Vector{Symbol},syms::NTuple{N,Symbol}) where N
bools = map(x -> x in syms,value)
if all(bools)
setfield!(obj,k,value)
else
ind = findfirst((!).(bools))
msg = msg_generator(value[ind],syms)
throw(ArgumentError(msg))
end
return nothing
end
function check_setfield!(obj,k::Symbol,value::Symbol,syms::NTuple{N,Symbol}) where N
if value in syms
setfield!(obj,k,value)
else
msg = msg_generator(value,syms)
throw(ArgumentError(msg))
end
return nothing
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 541 |
function set_problem_type(ind)
ind = fix_QML_types(ind)
if ind==0
model_data.problem_type = :classification
elseif ind==1
model_data.problem_type = :regression
else # ind==2
model_data.problem_type = :segmentation
end
return nothing
end
function get_problem_type()
if problem_type()==:classification
return 0
elseif problem_type()==:regression
return 1
else # problem_type()==:segmentation
return 2
end
end
function get_input_type()
return 0
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 5626 |
function load_regression_data(url::String)
ext_raw = split(url,".")[end]
ext = Unicode.normalize(ext_raw, casefold=true)
if ext=="csv"
labels_info = DataFrame(CSVFiles.load(url))
else ext=="xlsx"
labels_info = DataFrame(XLSX.readtable(url,1)...)
end
filenames_labels::Vector{String} = labels_info[:,1]
labels_original_T = map(ind->Vector(labels_info[ind,2:end]),1:size(labels_info,1))
loaded_labels::Vector{Vector{Float32}} = convert(Vector{Vector{Float32}},labels_original_T)
return filenames_labels,loaded_labels
end
function intersect_regression_data!(input_urls::Vector{String},filenames_inputs::Vector{String},
loaded_labels::Vector{Vector{Float32}},filenames_labels::Vector{String})
num = length(filenames_inputs)
inds_adj = zeros(Int64,num)
inds_remove = Vector{Int64}(undef,0)
cnt = 1
l = length(filenames_inputs)
while cnt<=l
filename = filenames_inputs[cnt]
ind = findfirst(x -> x==filename, filenames_labels)
if isnothing(ind)
deleteat!(input_urls,cnt)
deleteat!(filenames_inputs,cnt)
l-=1
else
inds_adj[cnt] = ind
cnt += 1
end
end
num = cnt - 1
inds_adj = inds_adj[1:num]
filenames_labels_temp = filenames_labels[inds_adj]
loaded_labels_temp = loaded_labels[inds_adj]
r = length(filenames_labels_temp)+1:length(filenames_labels)
deleteat!(filenames_labels,r)
deleteat!(loaded_labels,r)
filenames_labels .= filenames_labels_temp
loaded_labels .= loaded_labels_temp
return nothing
end
function intersect_inds(ar1,ar2)
inds1 = Array{Int64,1}(undef, 0)
inds2 = Array{Int64,1}(undef, 0)
for i=1:length(ar1)
inds_log = ar2.==ar1[i]
if any(inds_log)
push!(inds1,i)
push!(inds2,findfirst(inds_log))
end
end
return (inds1, inds2)
end
function remove_ext(files::Vector{String})
filenames = copy(files)
for i=1:length(files)
chars = collect(files[i])
ind = findfirst(chars.=='.')
filenames[i] = String(chars[1:ind-1])
end
return filenames
end
# Get urls of files in selected folders. Requires data and labels
function get_urls2(url_inputs::String,label_url::String,allowed_ext::Vector{String})
# Get a reference to url accumulators
input_urls = Vector{Vector{String}}(undef,0)
label_urls = Vector{Vector{String}}(undef,0)
filenames = Vector{Vector{String}}(undef,0)
fileindices = Vector{Vector{Int64}}(undef,0)
# Empty url accumulators
empty!(input_urls)
empty!(label_urls)
# Return if empty
if isempty(url_inputs) || isempty(label_url)
@error "Empty urls."
return nothing,nothing,nothing
end
# Get directories containing our images and labels
dirs_input= getdirs(url_inputs)
dirs_labels = getdirs(label_url)
# Keep only those present for both images and labels
dirs = intersect(dirs_input,dirs_labels)
# If no directories, then set empty string
if length(dirs)==0
dirs = [""]
end
# Collect urls
cnt = 0
for k = 1:length(dirs)
input_urls_temp = Vector{String}(undef,0)
label_urls_temp = Vector{String}(undef,0)
# Get files in a directory
files_input = getfiles(string(url_inputs,"/",dirs[k]))
files_labels = getfiles(string(label_url,"/",dirs[k]))
# Filter files
files_input = filter_ext(files_input,allowed_ext)
files_labels = filter_ext(files_labels,allowed_ext)
# Remove extensions from files
filenames_input = remove_ext(files_input)
filenames_labels = remove_ext(files_labels)
# Intersect file names
inds1, inds2 = intersect_inds(filenames_labels, filenames_input)
# Keep files present for both images and labels
files_input = files_input[inds2]
files_labels = files_labels[inds1]
# Push urls into accumulators
num = length(files_input)
for l = 1:num
push!(input_urls_temp,string(url_inputs,"/",files_input[l]))
push!(label_urls_temp,string(label_url,"/",files_labels[l]))
end
push!(filenames,filenames_input[inds2])
push!(fileindices,cnt+1:num)
push!(input_urls,input_urls_temp)
push!(label_urls,label_urls_temp)
cnt = num
end
return input_urls,label_urls,dirs,filenames,fileindices
end
# Convert images to BitArray{3}
function label_to_bool(labelimg::Array{RGB{Normed{UInt8,8}},2}, class_inds::Vector{Int64},
labels_color::Vector{Vector{Float64}},labels_incl::Vector{Vector{Int64}},
border::Vector{Bool},border_num::Vector{Int64})
colors = map(x->RGB((n0f8.(./(x,255)))...),labels_color)
num = length(class_inds)
num_borders = sum(border)
inds_borders = findall(border)
label = fill!(BitArray{3}(undef, size(labelimg)...,
num + num_borders),0)
# Find classes based on colors
for i in class_inds
colors_current = [colors[i]]
inds = findall(map(x->issubset(i,x),labels_incl))
if !isempty(class_inds)
push!(colors_current,colors[inds]...)
end
bitarrays = map(x -> .==(labelimg,x)[:,:,:],colors_current)
label[:,:,i] = any(cat(bitarrays...,dims=Val(3)),dims=3)
end
# Make classes outlining object borders
for j=1:length(inds_borders)
ind = inds_borders[j]
border = outer_perim(label[:,:,ind])
dilate!(border,border_num[ind])
label[:,:,num+j] = border
end
return label
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 2352 |
num_threads() = options.GlobalOptions.HardwareResources.num_threads
# Get urls of files in selected folders. Requires only data
function get_urls1(url_inputs::String,allowed_ext::Vector{String})
# Get a reference to url accumulators
input_urls = Vector{Vector{String}}(undef,0)
filenames = Vector{Vector{String}}(undef,0)
# Empty a url accumulator
empty!(input_urls)
# Return if empty
if isempty(url_inputs)
@warn "Directory is empty."
return nothing
end
# Get directories containing our images and labels
dirs = getdirs(url_inputs)
# If no directories, then set empty string
if length(dirs)==0
dirs = [""]
end
# Collect urls
for k = 1:length(dirs)
input_urls_temp = Vector{String}(undef,0)
dir = dirs[k]
# Get files in a directory
files_input = getfiles(string(url_inputs,"/",dir))
files_input = filter_ext(files_input,allowed_ext)
# Push urls into an accumulator
for l = 1:length(files_input)
push!(input_urls_temp,string(url_inputs,"/",dir,"/",files_input[l]))
end
push!(filenames,files_input)
push!(input_urls,input_urls_temp)
end
if dirs==[""]
url_split = split(url_inputs,('/','\\'))
dirs = [url_split[end]]
end
return input_urls,dirs,filenames
end
function getdirs(dir)
return filter(x -> isdir(joinpath(dir, x)),readdir(dir))
end
function getfiles(dir)
return filter(x -> !isdir(joinpath(dir, x)),
readdir(dir))
end
function filter_ext(urls::Vector{String},allowed_ext::Vector{String})
urls_split = split.(urls,'.')
ext = map(x->string(x[end]),urls_split)
ext = lowercase.(ext)
log_inds = map(x->x in allowed_ext,ext)
urls_out = urls[log_inds]
return urls_out
end
# Convert images to grayscale Array{Float32,2}
function image_to_gray_float(image::Array{RGB{Normed{UInt8,8}},2})
img_temp = channelview(float.(Gray.(image)))
return collect(reshape(img_temp,size(img_temp)...,1))
end
# Convert images to RGB Array{Float32,3}
function image_to_color_float(image::Array{RGB{Normed{UInt8,8}},2})
img_temp = permutedims(channelview(float.(image)),[2,3,1])
return collect(img_temp)
end
function load_image(url::String)
img::Array{RGB{N0f8},2} = load(url)
return img
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 3452 |
#---Accuracy-------------------------------------------
function accuracy_classification(predicted::A,actual::A) where {T<:Float32,A<:AbstractArray{T,2}}
acc = Vector{Float32}(undef,0)
for i in 1:size(predicted,2)
_ , actual_ind = collect(findmax(actual[:,i]))
_ , predicted_ind = collect(findmax(predicted[:,i]))
if actual_ind==predicted_ind
push!(acc,1)
else
push!(acc,0)
end
end
return mean(acc)
end
function accuracy_classification_weighted(predicted::A,actual::A,ws::Vector{T}) where {T<:Float32,A<:AbstractArray{T,2}}
l = size(predicted,2)
acc = Vector{Float32}(undef,l)
w = Vector{Float32}(undef,l)
for i = 1:l
_ , actual_ind = collect(findmax(actual[:,i]))
_ , predicted_ind = collect(findmax(predicted[:,i]))
w[i] = ws[actual_ind]
if actual_ind==predicted_ind
acc[i] = 1
else
acc[i] = 0
end
end
return mean(acc,StatsBase.weights(w))
end
function accuracy_regression(predicted::A,actual::A) where {T<:Float32,A<:AbstractArray{T}}
err = abs.(actual .- predicted)
err_relative = mean(err./actual)
acc = 1/(1+err_relative)
return acc
end
function accuracy_segmentation(predicted::A,actual::A) where {T<:Float32,A<:AbstractArray{T}}
actual_bool = actual.>0
predicted_bool = predicted.>0.5
# Calculate correct and incorrect class pixels as a BitArray
correct_bool = predicted_bool .& actual_bool
dif_bool = xor.(predicted_bool,actual_bool)
# Calculate class accuracies
sum_correct = convert(Float32,sum(correct_bool))
sum_dif = convert(Float32,sum(dif_bool))
acc = sum_correct./(sum_correct.+sum_dif)
if isnan(acc)
return 0f0
end
return acc
end
function calculate_sum(something_bool::AbstractArray{Bool,4})
sum_int_dim4 = collect(sum(something_bool, dims = [1,2,4]))
sum_int = sum_int_dim4[:]
return sum_int
end
# Weight accuracy using inverse frequency
function accuracy_segmentation_weighted(predicted::A,actual::A,ws::Vector{T}) where {T<:Float32,A<:AbstractArray{T}}
actual_bool = actual.>0
predicted_bool = predicted.>0.5
# Calculate correct and incorrect class pixels as a BitArray
correct_bool = predicted_bool .& actual_bool
dif_bool = xor.(predicted_bool,actual_bool)
# Calculate class accuracies
sum_correct_int = calculate_sum(correct_bool)
sum_dif_int = calculate_sum(dif_bool)
sum_correct = convert(Vector{Float32},sum_correct_int)
sum_dif = convert(Vector{Float32},sum_dif_int)
classes_accuracy = sum_correct./(sum_correct.+sum_dif)
acc = sum(ws.*classes_accuracy)
if isnan(acc)
return 0f0
end
return acc
end
# Returns an accuracy function
function get_accuracy_func(weights::Vector{Float32},some_options)
weight = some_options.Accuracy.weight_accuracy
if problem_type()==:classification
if weight
return (x,y) -> accuracy_classification_weighted(x,y,weights)
else
return accuracy_classification
end
elseif problem_type()==:regression
return accuracy_regression
elseif problem_type()==:segmentation
if weight
return (x,y) -> accuracy_segmentation_weighted(x,y,weights)
else
return accuracy_segmentation
end
end
end
add_dim(x::Array{T, N}) where {T,N} = reshape(x, Val(N+1)) | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 10332 |
#--- Applying a neural network
# Getting a slice and its information
function prepare_input_data(input_data::Union{Array{Float32,4},CuArray{Float32,4}},ind_max::Int64,
max_value::Int64,offset::Int64,num_slices::Int64,ind_split::Int64,j::Int64)
start_ind = 1 + (j-1)*ind_split
if j==num_slices
end_ind = max_value
else
end_ind = start_ind + ind_split-1
end
correct_size = end_ind-start_ind+1
start_ind = start_ind - offset
start_ind = start_ind<1 ? 1 : start_ind
end_ind = end_ind + offset
end_ind = end_ind>max_value ? max_value : end_ind
temp_data = input_data[:,start_ind:end_ind,:,:]
max_dim_size = size(temp_data,ind_max)
offset_add = Int64(ceil(max_dim_size/16)*16) - max_dim_size
temp_data = pad(temp_data,(0,offset_add),same)
output_data = (temp_data,correct_size,offset_add)
return output_data
end
# Makes output mask to have a correct size for stiching
function fix_size(temp_predicted::Union{Array{Float32,4},CuArray{Float32,4}},
num_slices::Int64,correct_size::Int64,ind_max::Int64,
offset_add::Int64,j::Int64)
temp_size = size(temp_predicted,ind_max)
offset_temp = (temp_size - correct_size) - offset_add
if offset_temp>0
div_result = offset_add/2
offset_add1 = Int64(floor(div_result))
offset_add2 = Int64(ceil(div_result))
if j==1
temp_predicted = temp_predicted[:,
(1+offset_add1):(end-offset_temp-offset_add2),:,:]
elseif j==num_slices
temp_predicted = temp_predicted[:,
(1+offset_temp+offset_add1):(end-offset_add2),:,:]
else
temp = (temp_size - correct_size - offset_add)/2
offset_temp = Int64(floor(temp))
offset_temp2 = Int64(ceil(temp))
temp_predicted = temp_predicted[:,
(1+offset_temp+offset_add1):(end-offset_temp2-offset_add2),:,:]
end
elseif offset_temp<0
throw(DomainError("offset_temp should be greater or equal to zero"))
end
end
# Accumulates and stiches slices
function accum_slices(model::Chain,input_data::T,
num_slices::Int64,offset::Int64) where T<:AbstractArray{Float32,4}
input_size = size(input_data)
max_value = maximum(input_size)
ind_max = 2
ind_split = convert(Int64,floor(max_value/num_slices))
predicted = Vector{T}(undef,0)
for j = 1:num_slices
temp_data,correct_size,offset_add =
prepare_input_data(input_data,ind_max,max_value,offset,num_slices,ind_split,j)
temp_predicted = model(temp_data)
temp_predicted = fix_size(temp_predicted,num_slices,correct_size,ind_max,offset_add,j)
push!(predicted,temp_predicted)
end
predicted_out = reduce(hcat,predicted)
return predicted_out
end
"""
forward(model::Chain, input_data::Array{Float32}; num_slices::Int64=1, offset::Int64=20, use_GPU::Bool=false)
The function takes in a model and input data and returns output from that model. `num_slices` specifies in how many
slices should an array be run thorugh a neural network. Allows to process images that otherwise cause an out of memory error.
`offset` specifies the size of an overlap that should be taken from the left and right side of each slice to allow for
an absense of a seam. `use_GPU` enables or disables GPU usage.
"""
function forward(model::Chain,input_data::Array{Float32};
num_slices::Int64=1,offset::Int64=20,use_GPU::Bool=false)
if use_GPU
input_data_gpu = CuArray(input_data)
model = gpu(model)
if num_slices==1
predicted = collect(model(input_data_gpu))
else
predicted = collect(accum_slices(model,input_data_gpu,num_slices,offset))
end
else
if num_slices==1
predicted = model(input_data)
else
predicted = accum_slices(model,input_data,num_slices,offset)
end
end
return predicted
end
"""
apply_border_data(input_data::BitArray{3},classes::Vector{ImageSegmentationClass})
Used for segmentation. Uses borders of objects that a neural network detected in order
to separate objects from each other. Output from a neural network should be fed after
converting to BitArray.
"""
function apply_border_data(input_data::BitArray{3},classes::Vector{ImageSegmentationClass})
class_inds,_,_,border,border_thickness = get_class_data(classes)
inds_border = findall(border)
if isnothing(inds_border)
return input_data
end
num_border = length(inds_border)
num_classes = length(class_inds)
data = BitArray{3}(undef,size(input_data)[1:2]...,num_border)
for i = 1:num_border
border_num_pixels = border_thickness[i]
ind_classes = inds_border[i]
ind_border = num_classes + ind_classes
data_classes_bool = input_data[:,:,ind_classes]
data_classes = convert(Array{Float32},data_classes_bool)
data_border = input_data[:,:,ind_border]
border_bool = data_border
background1 = erode(data_classes_bool .& border_bool,border_num_pixels)
background2 = outer_perim(border_bool)
background2[data_classes_bool] .= false
background2 = dilate(background2,border_num_pixels+1)
background = background1 .| background2
skel = thinning(border_bool)
background[skel] .= true
if classes[i].BorderClass.enabled
components = label_components((!).(border_bool),conn(4))
intensities = component_intensity(components,data_classes)
bad_components = findall(intensities.<0.7)
for i = 1:length(bad_components)
components[components.==bad_components[i]] .= 0
end
objects = data_classes.!=0
objects[skel] .= false
segmented = segment_objects(components,objects)
borders = mapwindow(x->!allequal(x), segmented, (3,3))
segmented[borders] .= 0
data[:,:,ind_classes] = segmented.>0
else
data_classes_bool[background] .= false
data[:,:,i] = data_classes_bool
end
end
return data
end
#---Padding
same(vect::Array{T},row::Int64,col::Int64) where T = repeat(vect,row,col)
function repeatCUDA(vect::CuArray,row::Int64,col::Int64)
if row!=1
array = reduce(vcat,repeat([vect],row,col))
else
array = reduce(hcat,repeat([vect],row,col))
end
return array
end
same(vect::CuArray{T},row::Int64,col::Int64) where T = repeatCUDA(vect,row,col)
function pad(array::A,padding::NTuple{2,Int64},fun::typeof(same)) where A<:AbstractArray{<:AbstractFloat, 4}
div_result = padding./2
leftpad = Int64.(floor.(div_result))
rightpad = Int64.(ceil.(div_result))
if padding[1]!=0
accum = Vector{A}(undef,0)
for i=1:size(array,3)
temp_array = array[:,:,i,:,:]
vec1 = permutedims(temp_array[1,:,:,:,:],(2,1,3,4))
vec2 = permutedims(temp_array[end,:,:,:,:],(2,1,3,4))
temp_array = vcat(fun(vec1,leftpad[1],1),array,fun(vec2,rightpad[1],1))
push!(accum,temp_array)
end
array = cat(accum...,dims=Val(3))
end
if padding[2]!=0
accum = Vector{A}(undef,0)
for i=1:size(array,3)
temp_array = array[:,:,i,:,:]
vec1 = temp_array[:,1,:,:,:]
vec2 = temp_array[:,end,:,:,:]
temp_array = hcat(fun(vec1,1,leftpad[2]),temp_array,fun(vec2,1,rightpad[2]))
push!(accum,temp_array)
end
array = cat(accum...,dims=Val(3))
end
return array
end
function pad(array::A,padding::NTuple{2,Int64},
fun::Union{typeof(zeros),typeof(ones)}) where {T<:AbstractFloat,A<:AbstractArray{T, 4}}
div_result = padding./2
leftpad = Int64.(floor.(div_result))
rightpad = Int64.(ceil.(div_result))
if padding[1]!=0
accum = Vector{A}(undef,0)
for i=1:size(array,3)
temp_array = array[:,:,i,:,:]
s_ar2 = size(temp_array,2)
s1 = (leftpad[1],s_ar2,1,1)
s2 = (rightpad[1],s_ar2,1,1)
output_array = vcat(fun(T,s1),temp_array,fun(T,s2))
push!(accum,output_array)
end
array = cat(accum...,dims=Val(3))
end
if padding[2]!=0
accum = Vector{A}(undef,0)
@info Vector{A}
for i=1:size(array,3)
temp_array = array[:,:,i,:,:]
s_ar1 = size(temp_array,1)
s1 = (s_ar1,leftpad[2],1,1)
s2 = (s_ar1,rightpad[2],1,1)
output_array = hcat(fun(T,s1),temp_array,fun(T,s2))
push!(accum,output_array)
end
array = cat(accum...,dims=Val(3))
end
return array
end
function pad(array::A,padding::NTuple{2,Int64},fun::typeof(same)) where A<:Matrix
div_result = padding./2
leftpad = Int64.(floor.(div_result))
rightpad = Int64.(ceil.(div_result))
if padding[1]!=0
vec1 = collect(array[1,:]')
vec2 = collect(array[end,:]')
array = vcat(fun(vec1,leftpad[1],1),array,fun(vec2,rightpad[1],1))
end
if padding[2]!=0
vec1 = array[:,1]
vec2 = array[:,end]
array = hcat(fun(vec1,1,leftpad[2]),array,fun(vec2,1,rightpad[2]))
end
return array
end
function pad(array::A,padding::NTuple{2,Int64},
fun::Union{typeof(zeros),typeof(ones)}) where {T,A<:Matrix{T}}
div_result = padding./2
leftpad = Int64.(floor.(div_result))
rightpad = Int64.(ceil.(div_result))
if padding[1]!=0
s_ar2 = size(array,2)
s1 = (leftpad[1],s_ar2)
s2 = (rightpad[1],s_ar2)
array = vcat(fun(T,s1),array,fun(T,s2))
end
if padding[2]!=0
s_ar1 = size(array,1)
s1 = (s_ar1,leftpad[2])
s2 = (s_ar1,rightpad[2])
array = hcat(fun(T,s1),array,fun(T,s2))
end
return array
end
function fix_image_size(model_data::ModelData,img::Matrix)
im_size = size(img)
ratio_needed = model_data.input_size[1]/model_data.input_size[2]
pad_num = convert(Int64,round(im_size[1]/ratio_needed - im_size[2]))
if pad_num>0
img = pad(img,(0,pad_num),zeros)
else
img = pad(img,(-pad_num,0),zeros)
end
img = imresize(img,model_data.input_size[1:2])
return img
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 2810 |
module Application
using Parameters
import ..check_setfield!
#---Model data----------------------------------------------------------
abstract type AbstractOutputOptions end
mutable struct ImageClassificationOutputOptions<:AbstractOutputOptions
end
mutable struct ImageRegressionOutputOptions<:AbstractOutputOptions
end
@with_kw mutable struct OutputMask
mask::Bool = false
mask_border::Bool = false
mask_applied_border::Bool = false
end
@with_kw mutable struct OutputArea
area_distribution::Bool = false
obj_area::Bool = false
obj_area_sum::Bool = false
binning::Symbol = :auto
value::Float64 = 10
normalization::Symbol = :none
end
@with_kw mutable struct OutputVolume
volume_distribution::Bool = false
obj_volume::Bool = false
obj_volume_sum::Bool = false
binning::Symbol = :auto
value::Float64 = 10
normalization::Symbol = :none
end
@with_kw mutable struct ImageSegmentationOutputOptions<:AbstractOutputOptions
Mask::OutputMask = OutputMask()
Area::OutputArea = OutputArea()
Volume::OutputVolume = OutputVolume()
end
function Base.setproperty!(obj::Union{OutputArea,OutputVolume},k::Symbol,value::Symbol)
if k==:binning
syms = (:auto,:number_of_bins,:bin_width)
check_setfield!(obj,k,value,syms)
elseif k==:normalization
syms = (:none,:probability,:density,:pdf)
check_setfield!(obj,k,value,syms)
else
setfield!(obj,k,value)
end
return nothing
end
#---Data----------------------------------------------------------------
@with_kw mutable struct ApplicationData
input_urls::Vector{Vector{String}} = Vector{Vector{String}}(undef,0)
folders::Vector{String} = Vector{String}(undef,0)
url_inputs::String = ""
tasks::Vector{Task} = Vector{Task}(undef,0)
end
application_data = ApplicationData()
#---Options----------------------------------------------------------------
@with_kw mutable struct ApplicationOptions
savepath::String = ""
apply_by::Symbol = :file
data_type::Symbol = :csv
image_type::Symbol = :png
scaling::Float64 = 1
end
application_options = ApplicationOptions()
function Base.setproperty!(obj::ApplicationOptions,k::Symbol,value::Symbol)
if k==:apply_by
syms = (:file,:folder)
check_setfield!(obj,k,value,syms)
elseif k==:data_type
syms = (:csv,:xlsx,:json,:bson)
check_setfield!(obj,k,value,syms)
elseif k==:image_type
syms = (:png,:tiff,:json,:bson)
check_setfield!(obj,k,value,syms)
end
return nothing
end
#---Export all--------------------------------------------------------------
for n in names(@__MODULE__; all=true)
if Base.isidentifier(n) && n ∉ (Symbol(@__MODULE__), :eval, :include)
@eval export $n
end
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 1047 |
module Classes
using Parameters
#---Data----------------------------------------------------------------
abstract type AbstractClass end
@with_kw mutable struct ImageClassificationClass<:AbstractClass
name::String = ""
weight::Float32 = 1
end
@with_kw mutable struct ImageRegressionClass<:AbstractClass
name::String = ""
end
@with_kw mutable struct BorderClass
enabled::Bool = false
thickness::Int64 = 3
end
@with_kw mutable struct ImageSegmentationClass<:AbstractClass
name::String = ""
weight::Float32 = 1
color::Vector{Float64} = Vector{Float64}(undef,3)
parents::Vector{String} = ["",""]
overlap::Bool = false
min_area::Int64 = 0
BorderClass::BorderClass = BorderClass()
end
#---Options----------------------------------------------------------------
#---Export all--------------------------------------------------------------
for n in names(@__MODULE__; all=true)
if Base.isidentifier(n) && n ∉ (Symbol(@__MODULE__), :eval, :include)
@eval export $n
end
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 3457 |
module DataPreparation
using Parameters
#---Data----------------------------------------------------------------
@with_kw mutable struct ClassificationUrlsData
input_urls::Vector{Vector{String}} = Vector{Vector{String}}(undef,0)
label_urls::Vector{String} = Vector{String}(undef,0)
filenames::Vector{Vector{String}} = Vector{Vector{String}}(undef,0)
end
@with_kw mutable struct ClassificationResultsData
data_input::Union{Vector{T1},Vector{T2}} where {T1<:Array{Float32,1},T2<:Array{Float32,3}} = Vector{Array{Float32,3}}(undef,0)
data_labels::Vector{Int32} = Vector{Int32}(undef,0)
end
@with_kw mutable struct ClassificationData
Urls::ClassificationUrlsData = ClassificationUrlsData()
Data::ClassificationResultsData = ClassificationResultsData()
end
@with_kw mutable struct RegressionUrlsData
initial_data_labels::Vector{Vector{Float32}} = Vector{Vector{Float32}}(undef,0)
input_urls::Vector{String} = Vector{String}(undef,0)
labels_url::String = ""
end
@with_kw mutable struct RegressionResultsData{T1<:Array{Float32,1},T2<:Array{Float32,3}}
data_input::Union{Vector{T1},Vector{T2}} = Vector{Array{Float32,3}}(undef,0)
data_labels::Union{Vector{T1},Vector{T2}} = Vector{Vector{Float32}}(undef,0)
end
@with_kw mutable struct RegressionData
Urls::RegressionUrlsData = RegressionUrlsData()
Data::RegressionResultsData = RegressionResultsData()
end
@with_kw mutable struct SegmentationUrlsData
input_urls::Vector{String} = Vector{String}(undef,0)
label_urls::Vector{String} = Vector{String}(undef,0)
foldernames::Vector{String} = Vector{String}(undef,0)
end
@with_kw mutable struct SegmentationResultsData
data_input::Vector{Array{Float32,3}} = Vector{Array{Float32,3}}(undef,0)
data_labels::Vector{BitArray{3}} = Vector{BitArray{3}}(undef,0)
end
@with_kw mutable struct SegmentationData
Urls::SegmentationUrlsData = SegmentationUrlsData()
Data::SegmentationResultsData = SegmentationResultsData()
end
@with_kw mutable struct PreparationUrls
url_inputs::String = ""
url_labels::String = ""
end
preparation_urls = PreparationUrls()
@with_kw mutable struct PreparationData
ClassificationData::ClassificationData = ClassificationData()
RegressionData::RegressionData = RegressionData()
SegmentationData::SegmentationData = SegmentationData()
Urls::PreparationUrls = preparation_urls
tasks::Vector{Task} = Vector{Task}(undef,0)
end
preparation_data = PreparationData()
#---Options----------------------------------------------------------------
@with_kw mutable struct BackgroundCroppingOptions
enabled::Bool = false
threshold::Float64 = 0.3
closing_value::Int64 = 1
end
background_cropping_options = BackgroundCroppingOptions()
@with_kw mutable struct ImagePreparationOptions
grayscale::Bool = false
mirroring::Bool = false
num_angles::Int64 = 1
min_fr_pix::Float64 = 0.0
BackgroundCropping::BackgroundCroppingOptions = background_cropping_options
end
image_preparation_options = ImagePreparationOptions()
@with_kw struct DataPreparationOptions
Images::ImagePreparationOptions = image_preparation_options
end
data_preparation_options = DataPreparationOptions()
#---Export all--------------------------------------------------------------
for n in names(@__MODULE__; all=true)
if Base.isidentifier(n) && n ∉ (Symbol(@__MODULE__), :eval, :include)
@eval export $n
end
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 8516 |
module Design
using Parameters
import ..Normalization, ..none
#---Model data----------------------------------------------------------------
module Layers
using Parameters
abstract type AbstractLayerInfo end
@with_kw mutable struct GenericInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
end
@with_kw mutable struct InputInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
size::NTuple{3,Int64} = (0,0,0)
normalization::Int64 = 0
end
@with_kw mutable struct OutputInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
loss::Int64 = 0
end
@with_kw mutable struct ConvInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
filters::Int64 = 0
filter_size::NTuple{2,Int64} = (0,0)
stride::Int64 = 0
dilation_factor::Int64 = 0
end
@with_kw mutable struct TConvInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
filters::Int64 = 0
filter_size::NTuple{2,Int64} = (0,0)
stride::Int64 = 0
dilation_factor::Int64 = 0
end
@with_kw mutable struct DenseInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
filters::Int64 = 0
end
@with_kw mutable struct BatchNormInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
epsilon::Float64 = 0
end
@with_kw mutable struct DropoutInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
probability::Float64 = 0
end
@with_kw mutable struct LeakyReLUInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
scale::Float64 = 0
end
@with_kw mutable struct ELUInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
alpha::Float64 = 0
end
@with_kw mutable struct PoolInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
poolsize::NTuple{2,Int64} = (0,0)
stride::Int64 = 0
end
@with_kw mutable struct AdditionInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
inputs::Int64 = 0
end
@with_kw mutable struct JoinInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
inputs::Int64 = 0
dimension::Int64 = 0
end
@with_kw mutable struct SplitInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
outputs::Int64 = 0
dimension::Int64 = 0
end
@with_kw mutable struct UpsampleInfo<:AbstractLayerInfo
id::Int64 = 0
name::String = ""
type::String = ""
group::String = ""
connections_up::Vector{Int64} = Vector{Int64}(undef,0)
connections_down::Vector{Vector{Int64}} = Vector{Vector{Int64}}(undef,0)
x::Float64 = 0
y::Float64 = 0
label_color::NTuple{3,Int64} = (0,0,0)
multiplier::Int64 = 0
dimensions::Vector{Int64} = [0]
end
#---Export all--------------------------------------------------------------
for n in names(@__MODULE__; all=true)
if Base.isidentifier(n) && n ∉ (Symbol(@__MODULE__), :eval, :include)
@eval export $n
end
end
end
#---Data----------------------------------------------------------------
using Flux, .Layers
@with_kw mutable struct DesignModelData
model::Flux.Chain = Flux.Chain()
normalization::Normalization = Normalization(none,())
loss::Function = Flux.Losses.mse
input_size::NTuple{3,Int64} = (0,0,0)
output_size::NTuple{3,Int64} = (0,0,0)
problem_type::Symbol = :classification
input_type::Symbol = :image
layers_info::Vector{AbstractLayerInfo} = Vector{AbstractLayerInfo}(undef,0)
end
@with_kw mutable struct DesignData
ModelData::DesignModelData = DesignModelData()
warnings::Vector{String} = Vector{String}(undef,0)
end
design_data = DesignData()
#---Options----------------------------------------------------------------
@with_kw mutable struct DesignOptions
width::Float64 = 340
height::Float64 = 100
min_dist_x::Float64 = 80
min_dist_y::Float64 = 40
end
design_options = DesignOptions()
#---Export all--------------------------------------------------------------
for n in names(@__MODULE__; all=true)
if Base.isidentifier(n) && n ∉ (Symbol(@__MODULE__), :eval, :include)
@eval export $n
end
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 3398 |
module Training
using Parameters, Dates
import ..DataPreparation: ClassificationData, RegressionData, SegmentationData
#---Data----------------------------------------------------------------
@with_kw mutable struct TrainingPlotData
iteration::Int64 = 0
epoch::Int64 = 0
iterations_per_epoch::Int64 = 0
starting_time::DateTime = now()
max_iterations::Int64 = 0
learning_rate_changed::Bool = false
end
training_plot_data = TrainingPlotData()
@with_kw mutable struct TrainingResultsData
accuracy::Vector{Float32} = Vector{Float32}(undef,0)
loss::Vector{Float32} = Vector{Float32}(undef,0)
test_accuracy::Vector{Float32} = Vector{Float32}(undef,0)
test_loss::Vector{Float32} = Vector{Float32}(undef,0)
test_iteration::Vector{Int64} = Vector{Int64}(undef,0)
end
training_results_data = TrainingResultsData()
@with_kw mutable struct TrainingOptionsData
optimiser_params::Vector{Vector{Float64}} = [[],[0.9],[0.9],[0.9],[0.9,0.999],
[0.9,0.999],[0.9,0.999],[],[0.9],[0.9,0.999],[0.9,0.999],[0.9,0.999,0]]
optimiser_params_names::Vector{Vector{String}} = [[],["ρ"],["ρ"],["ρ"],["β1","β2"],
["β1","β2"],["β1","β2"],[],["ρ"],["β1","β2"],["β1","β2"],["β1","β2","Weight decay"]]
allow_lr_change::Bool = true
run_test::Bool = false
end
training_options_data = TrainingOptionsData()
@with_kw mutable struct TrainingData
PlotData::TrainingPlotData = training_plot_data
Results::TrainingResultsData = training_results_data
ClassificationData::ClassificationData = ClassificationData()
RegressionData::RegressionData = RegressionData()
SegmentationData::SegmentationData = SegmentationData()
OptionsData::TrainingOptionsData = training_options_data
weights::Vector{Float32} = Vector{Float32}(undef,0)
tasks::Vector{Task} = Vector{Task}(undef,0)
warnings::Vector{String} = Vector{String}(undef,0)
errors::Vector{String} = Vector{String}(undef,0)
end
training_data = TrainingData()
@with_kw mutable struct TestingData
ClassificationData::ClassificationData = ClassificationData()
RegressionData::RegressionData = RegressionData()
SegmentationData::SegmentationData = SegmentationData()
end
testing_data = TestingData()
#---Options----------------------------------------------------------------
@with_kw mutable struct AccuracyOptions
weight_accuracy::Bool = true
accuracy_mode::Symbol = :auto
end
accuracy_options = AccuracyOptions()
@with_kw mutable struct TestingOptions
data_preparation_mode::Symbol = :auto
test_data_fraction::Float64 = 0.1
num_tests::Float64 = 2
end
testing_options = TestingOptions()
@with_kw mutable struct HyperparametersOptions
optimiser::Symbol = :ADAM
optimiser_params::Vector{Float64} = [0.9,0.999]
learning_rate::Float64 = 1e-3
epochs::Int64 = 1
batch_size::Int64 = 10
end
hyperparameters_options = HyperparametersOptions()
@with_kw mutable struct TrainingOptions
Accuracy::AccuracyOptions = accuracy_options
Testing::TestingOptions = testing_options
Hyperparameters::HyperparametersOptions = hyperparameters_options
end
training_options = TrainingOptions()
#---Export all--------------------------------------------------------------
for n in names(@__MODULE__; all=true)
if Base.isidentifier(n) && n ∉ (Symbol(@__MODULE__), :eval, :include)
@eval export $n
end
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 3614 |
module Validation
using Parameters, ColorTypes, FixedPointNumbers
#---Data----------------------------------------------------------------
@with_kw mutable struct ValidationImageClassificationResults
original_images::Vector{Array{RGB{N0f8},2}} = Vector{Array{RGB{N0f8},2}}(undef,0)
predicted_labels::Vector{String} = Vector{String}(undef,0)
target_labels::Vector{String} = Vector{String}(undef,0)
accuracy::Vector{Float32} = Vector{Float32}(undef,0)
loss::Vector{Float32} = Vector{Float32}(undef,0)
end
validation_image_classification_results = ValidationImageClassificationResults()
@with_kw mutable struct ValidationImageRegressionResults
original_images::Vector{Array{RGB{N0f8},2}} = Vector{Array{RGB{N0f8},2}}(undef,0)
predicted_labels::Vector{Vector{Float32}}= Vector{Vector{Float32}}(undef,0)
target_labels::Vector{Vector{Float32}} = Vector{Vector{Float32}}(undef,0)
accuracy::Vector{Float32} = Vector{Float32}(undef,0)
loss::Vector{Float32} = Vector{Float32}(undef,0)
end
validation_image_regression_results = ValidationImageRegressionResults()
@with_kw mutable struct ValidationImageSegmentationResults
original_images::Vector{Array{RGB{N0f8},2}} = Vector{Array{RGB{N0f8},2}}(undef,0)
predicted_data::Vector{Vector{Tuple{BitArray{2},Vector{N0f8}}}} =
Vector{Vector{Tuple{BitArray{2},Vector{N0f8}}}}(undef,0)
target_data::Vector{Vector{Tuple{BitArray{2},Vector{N0f8}}}} =
Vector{Vector{Tuple{BitArray{2},Vector{N0f8}}}}(undef,0)
error_data::Vector{Vector{Tuple{BitArray{3},Vector{N0f8}}}} =
Vector{Vector{Tuple{BitArray{3},Vector{N0f8}}}}(undef,0)
accuracy::Vector{Float32} = Vector{Float32}(undef,0)
loss::Vector{Float32} = Vector{Float32}(undef,0)
end
validation_image_segmentation_results = ValidationImageSegmentationResults()
@with_kw mutable struct ValidationUrls
input_urls::Vector{String} = Vector{String}(undef,0)
label_urls::Vector{String} = Vector{String}(undef,0)
labels_classification::Vector{Int32} = Vector{Int32}(undef,0)
labels_regression::Vector{Vector{Float32}} = Vector{Float32}(undef,0)
url_inputs::String = ""
url_labels::String = ""
end
validation_urls = ValidationUrls()
@with_kw mutable struct ValidationPlotData
original_image::Array{RGB{N0f8},2} = Array{RGB{N0f8},2}(undef,0,0)
label_image::Array{RGB{N0f8},2} = Array{RGB{N0f8},2}(undef,0,0)
use_labels::Bool = false
end
validation_plot_data = ValidationPlotData()
@with_kw struct ValidationData
PlotData::ValidationPlotData = validation_plot_data
ImageClassificationResults::ValidationImageClassificationResults = validation_image_classification_results
ImageRegressionResults::ValidationImageRegressionResults = validation_image_regression_results
ImageSegmentationResults::ValidationImageSegmentationResults = validation_image_segmentation_results
Urls::ValidationUrls = validation_urls
tasks::Vector{Task} = Vector{Task}(undef,0)
end
validation_data = ValidationData()
#---Options----------------------------------------------------------------
@with_kw mutable struct AccuracyOptions
weight_accuracy::Bool = true
accuracy_mode::Symbol = :auto
end
accuracy_options = AccuracyOptions()
@with_kw mutable struct ValidationOptions
Accuracy::AccuracyOptions = accuracy_options
end
validation_options = ValidationOptions()
#---Export all--------------------------------------------------------------
for n in names(@__MODULE__; all=true)
if Base.isidentifier(n) && n ∉ (Symbol(@__MODULE__), :eval, :include)
@eval export $n
end
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 876 |
module DataPreparation
# Import packages
using
# Data structuring
DataFrames,
# Data import/export
FileIO, ImageIO, XLSX, CSVFiles,
# Data manipulation
Unicode,
# Image manipulation
Images, ColorTypes, ImageFiltering, ImageTransformations,
ImageMorphology, DSP, ImageMorphology.FeatureTransform, ImageSegmentation,
# Math functions
StatsBase, Statistics, LinearAlgebra, Combinatorics,
# Other
ProgressMeter, FLoops,
# EasyML ecosystem
..Common, ..Common.Classes, ..Common.DataPreparation
import ..Classes
import ..Classes: change_classes
import ..Common.dilate!
# Include functions
include(string(common_dir(),"/common/preparation_validation.jl"))
include(string(common_dir(),"/common/preparation_validation_application.jl"))
include("main.jl")
include("exported_functions.jl")
export data_preparation_options, DataPreparationOptions
export get_urls, prepare_data
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 8360 |
"""
change(data_preparation_options::DataPreparationOptions)
Allows to change `data_preparation_options` in a GUI.
"""
function Common.change(data::DataPreparationOptions)
@qmlfunction(
# Options
get_options,
set_options,
save_options,
# Model data
set_model_data,
get_model_data,
rm_model_data,
# Other
unit_test
)
path_qml = string(@__DIR__,"/gui/DataPreparationOptions.qml")
gui_dir = string("file:///",replace(@__DIR__, "\\" => "/"),"/gui/")
text = add_templates(path_qml)
loadqml(QByteArray(text), gui_dir = gui_dir)
exec()
return nothing
end
function get_urls(url_inputs::String,url_labels::String,preparation_data::PreparationData)
url_inputs = replace(url_inputs, '\\'=>'/')
url_labels = replace(url_labels, '\\'=>'/')
preparation_data.Urls.url_inputs = url_inputs
preparation_data.Urls.url_labels = url_labels
if !isdir(url_inputs)
@error string(url_inputs," does not exist.")
return nothing
end
if problem_type()==:classification || problem_type()==:segmentation
if !isdir(url_labels)
@error string(url_labels," does not exist.")
return nothing
end
elseif problem_type()==:regression
if !isfile(url_labels)
@error string(url_labels," does not exist.")
return nothing
end
end
urls = get_urls_main(model_data,preparation_data)
return urls
end
"""
get_urls(url_inputs::String,url_labels::String)
Gets URLs to all files present in both folders (or a folder and a file)
specified by `url_inputs` and `url_labels`. URLs are automatically saved to `EasyML.preparation_data`.
"""
get_urls(url_inputs,url_labels) = get_urls(url_inputs,url_labels,preparation_data)
function get_urls(url_inputs::String,preparation_data::PreparationData)
if problem_type()!=:classification
@error "Label data directory URL was not given."
return nothing
end
url_inputs = replace(url_inputs, '\\'=>'/')
preparation_data.Urls.url_inputs = url_inputs
if !isdir(url_inputs)
@error string(url_inputs," does not exist.")
return nothing
end
urls = get_urls_main(model_data,preparation_data)
return urls
end
"""
get_urls(url_inputs::String)
Used for classification. Gets URLs to all files present in folders located at a folder specified by `url_inputs`
for training. Folders should have names identical to the name of classes. URLs are automatically saved to `EasyML.preparation_data`.
"""
get_urls(url_inputs) = get_urls(url_inputs,preparation_data)
function get_urls(preparation_data::PreparationData)
dir = pwd()
@info "Select a directory with input data."
path = get_folder(dir)
if !isempty(path)
preparation_data.Urls.url_inputs = path
@info string(preparation_data.Urls.url_inputs, " was selected.")
else
@error "Input data directory URL is empty."
return nothing
end
if problem_type()==:classification
elseif problem_type()==:regression
@info "Select a file with label data."
name_filters = ["*.csv","*.xlsx"]
path = get_file(dir,name_filters)
if !isempty(path)
preparation_data.Urls.url_labels = path
@info string(preparation_data.Urls.url_labels, " was selected.")
else
@error "Label data file URL is empty."
return nothing
end
elseif problem_type()==:segmentation
@info "Select a directory with label data."
path = get_folder(dir)
if !isempty(path)
preparation_data.Urls.url_labels = path
@info string(preparation_data.Urls.url_labels, " was selected.")
else
@error "Label data directory URL is empty."
return nothing
end
end
urls = get_urls_main(model_data,preparation_data)
return urls
end
"""
get_urls()
Opens a folder/file dialog or dialogs to choose folders or folder and a file containing inputs
and labels. URLs are automatically saved to `EasyML.preparation_data`.
"""
get_urls() = get_urls(preparation_data)
function prepare_data(model_data::ModelData,preparation_data::PreparationData)
if isempty(model_data.classes)
@error "Classes are empty."
return nothing
end
fields = [:data_input,:data_labels]
for i in fields
empty!(getfield(preparation_data.ClassificationData.Data,i))
empty!(getfield(preparation_data.RegressionData.Data,i))
empty!(getfield(preparation_data.SegmentationData.Data,i))
end
empty_channel(:data_preparation_progress)
if input_type()==:image
if problem_type()==:classification
empty!(preparation_data.SegmentationData.Urls.input_urls)
empty!(preparation_data.SegmentationData.Urls.label_urls)
empty!(preparation_data.RegressionData.Urls.input_urls)
if isempty(preparation_data.ClassificationData.Urls.input_urls)
@error "No input urls. Run 'get_url'."
return nothing
end
elseif problem_type()==:regression
empty!(preparation_data.ClassificationData.Urls.input_urls)
empty!(preparation_data.ClassificationData.Urls.label_urls)
empty!(preparation_data.SegmentationData.Urls.input_urls)
empty!(preparation_data.SegmentationData.Urls.label_urls)
if isempty(preparation_data.RegressionData.Urls.input_urls)
@error "No input urls. Run 'get_url'."
return nothing
end
elseif problem_type()==:segmentation
empty!(preparation_data.ClassificationData.Urls.input_urls)
empty!(preparation_data.ClassificationData.Urls.label_urls)
empty!(preparation_data.RegressionData.Urls.input_urls)
if isempty(preparation_data.SegmentationData.Urls.input_urls)
@error "No input urls. Run 'get_url'."
return nothing
end
end
end
t = prepare_data_main(model_data,preparation_data,channels)
max_value = 0
value = 0
p = Progress(0)
while true
if max_value!=0
temp_value = get_progress(:data_preparation_progress)
if temp_value!=false
value += temp_value
# handle progress here
next!(p)
elseif value==max_value
break
else
state,error = check_task(t)
if state==:error
throw(error)
return nothing
end
sleep(0.1)
end
else
temp_value = get_progress(:data_preparation_progress)
if temp_value!=false
if temp_value!=0
max_value = temp_value
p.n = max_value
else
@error "No data to process."
break
end
else
state,error = check_task(t)
if state==:error
throw(error)
return nothing
end
sleep(0.1)
end
end
end
if problem_type()==:classification
return preparation_data.ClassificationData.Data
elseif problem_type()==:regression
return preparation_data.RegressionData.Data
else # problem_type()==:segmentation
return preparation_data.SegmentationData.Data
end
end
"""
prepare_data()
Prepares images and corresponding labels for training using URLs loaded previously using
`get_urls`. Saves data to EasyML.PreparedDarta.
"""
prepare_data() = prepare_data(model_data,preparation_data)
"""
remove_urls()
Removes urls.
"""
function remove_urls(preparation_data::PreparationData)
if problem_type()==:classification
preparation_data.ClassificationData.Urls = ClassificationUrlsData()
elseif problem_type()==:regression
preparation_data.RegressionData.Urls = RegressionUrlsData()
else # problem_type()==:segmentation
preparation_data.SegmentationData.Urls = SegmentationUrlsData()
end
end
remove_urls() = remove_urls(preparation_data)
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 18929 |
#----------------------------------------------------------------
# Allows to write to data from GUI
function set_model_data_main(model_data::ModelData,field,value)
field_string::String = fix_QML_types(field)
value_string::String = fix_QML_types(value)
field = Symbol(field_string)
value = Symbol(value_string)
values = getproperty(model_data, field)
if !(value in values)
push!(values,value)
end
return nothing
end
set_model_data(field,values) = set_model_data_main(model_data,field,values)
function get_model_data_main(model_data::ModelData,field,value)
field_string = fix_QML_types(field)
value_string = fix_QML_types(value)
field = Symbol(field_string)
values_string = string.(getproperty(model_data, field))
if value_string in values_string
return true
else
return false
end
end
get_model_data(field,value) = get_model_data_main(model_data,field,value)
function rm_model_data_main(model_data::ModelData,field,value)
field_string = fix_QML_types(field)
value_string = fix_QML_types(value)
field = Symbol(field_string)
values = getproperty(model_data, field)
values_string = string.(values)
ind = findall(value_string.==values_string)
if !isempty(ind)
deleteat!(values,ind)
end
return nothing
end
rm_model_data(field,value) = rm_model_data_main(model_data,field,value)
#---get_urls functions------------------------------------------------------
function get_urls_main(model_data::ModelData,preparation_data::PreparationData)
if isempty(model_data.classes)
@error "Classes are empty."
return nothing
end
remove_urls()
url_inputs = preparation_data.Urls.url_inputs
url_labels = preparation_data.Urls.url_labels
if input_type()==:image
allowed_ext = ["png","jpg","jpeg"]
end
if problem_type()==:classification
classification_data = preparation_data.ClassificationData
input_urls,dirs,_ = get_urls1(url_inputs,allowed_ext)
labels = map(class -> class.name,model_data.classes)
dirs_raw = intersect(dirs,labels)
intersection_bool = map(x-> x in labels,dirs_raw)
if sum(intersection_bool)!=length(labels)
inds = findall((!).(intersection_bool))
for i in inds
@warn string(dirs_raw[i]," is not a name of one of the labels. The folder was ignored.")
end
dirs = dirs_raw[inds]
input_urls = input_urls[inds]
else
dirs = dirs_raw
input_urls = input_urls
end
if isempty(input_urls)
@warn "The folder did not have any suitable data."
else
classification_data.Urls.input_urls = input_urls
classification_data.Urls.label_urls = dirs
return classification_data.Urls
end
elseif problem_type()==:regression
regression_data = preparation_data.RegressionData
input_urls_raw,_,filenames_inputs_raw = get_urls1(url_inputs,allowed_ext)
input_urls = reduce(vcat,input_urls_raw)
filenames_inputs = reduce(vcat,filenames_inputs_raw)
filenames_labels,loaded_labels = load_regression_data(url_labels)
intersect_regression_data!(input_urls,filenames_inputs,loaded_labels,filenames_labels)
if isempty(input_urls)
@warn "The folder did not have any suitable data."
else
regression_data.Urls.input_urls = input_urls
regression_data.Urls.labels_url = url_labels
regression_data.Urls.initial_data_labels = loaded_labels
return regression_data.Urls
end
elseif problem_type()==:segmentation
segmentation_data = preparation_data.SegmentationData
input_urls_raw,label_urls_raw,_,_,_ = get_urls2(url_inputs,url_labels,allowed_ext)
input_urls = reduce(vcat,input_urls_raw)
label_urls = reduce(vcat,label_urls_raw)
if isempty(input_urls)
@warn "The folder did not have any suitable data."
else
segmentation_data.Urls.input_urls = input_urls
segmentation_data.Urls.label_urls = label_urls
return segmentation_data.Urls
end
end
return nothing
end
#---prepare_data functions-------------------------------------------------------------------
# Removes rows and columns from image sides if they are uniformly black.
function crop_background(img::Array{Float32,3},label::BitArray{3},
threshold::Float64,closing_value::Int64)
img_temp = mean(img,dims=3)[:,:]
field = imfilter(img_temp.<threshold, Kernel.gaussian(4)).>0.5
field = closing!(field,closing_value)
row_bool = (!).(alldim(field,1))
col_bool = (!).(alldim(field,2))
col1 = findfirst(col_bool)
col2 = findlast(col_bool)
row1 = findfirst(row_bool)
row2 = findlast(row_bool)
col1 = isnothing(col1) ? 1 : col1
col2 = isnothing(col2) ? size(img,1) : col2
row1 = isnothing(row1) ? 1 : row1
row2 = isnothing(row2) ? size(img,2) : row2
img = img[row1:row2,col1:col2,:]
label = label[row1:row2,col1:col2,:]
return img,label
end
function load_images(urls::Vector{String},channel::Channel)
num = length(urls)
imgs = Vector{Array{RGB{N0f8},2}}(undef,num)
for i = 1:num
imgs[i] = load_image(urls[i])
put!(channel,1)
end
return imgs
end
# Returns color for labels, whether should be combined with other
# labels and whether border data should be obtained
function get_class_data(classes::Vector{ImageSegmentationClass})
num = length(classes)
class_names = Vector{String}(undef,num)
class_parents = Vector{Vector{String}}(undef,num)
labels_color = Vector{Vector{Float64}}(undef,num)
labels_incl = Vector{Vector{Int64}}(undef,num)
for i=1:num
class = classes[i]
class_names[i] = classes[i].name
class_parents[i] = classes[i].parents
labels_color[i] = class.color
end
for i=1:num
labels_incl[i] = findall(any.(map(x->x.==class_parents[i],class_names)))
end
class_inds = Vector{Int64}(undef,0)
for i = 1:num
if !classes[i].overlap
push!(class_inds,i)
end
end
num = length(class_inds)
border = Vector{Bool}(undef,num)
border_thickness = Vector{Int64}(undef,num)
for i in class_inds
class = classes[i]
border[i] = class.BorderClass.enabled
border_thickness[i] = class.BorderClass.thickness
end
return class_inds,labels_color,labels_incl,border,border_thickness
end
# Augments images and labels using rotation and mirroring
function augment(float_img::Array{Float32,3},size12::Tuple{Int64,Int64},
num_angles::Int64,mirroring_inds::Vector{Int64})
data = Vector{Array{Float32,3}}(undef,0)
angles_range = range(0,stop=2*pi,length=num_angles+1)
angles = collect(angles_range[1:end-1])
num = length(angles)
for g = 1:num
angle_val = angles[g]
img2 = rotate_img(float_img,angle_val)
size1_adj = size12[1]*0.9
size2_adj = size12[2]*0.9
num1 = Int64(floor(size(img2,1)/size1_adj))
num2 = Int64(floor(size(img2,2)/size2_adj))
step1 = Int64(floor(size1_adj/num1))
step2 = Int64(floor(size2_adj/num2))
num1 = max(num1-1,1)
num2 = max(num2-1,1)
for i = 1:num1
for j = 1:num2
ymin = (i-1)*step1+1
xmin = (j-1)*step2+1
I1 = img2[ymin:ymin+size12[1]-1,xmin:xmin+size12[2]-1,:]
if std(I1)<0.01
continue
else
for h in mirroring_inds
if h==1
I1_out = I1
else
I1_out = reverse(I1, dims = 2)
end
data_out = I1_out
if !isassigned(data_out)
return nothing
end
push!(data,data_out)
end
end
end
end
end
return data
end
# Augments images and labels using rotation and mirroring
function augment(float_img::Array{Float32,3},label::BitArray{3},size12::Tuple{Int64,Int64},
num_angles::Int64,min_fr_pix::Float64,mirroring_inds::Vector{Int64})
data = Vector{Tuple{Array{Float32,3},BitArray{3}}}(undef,0)
lim = prod(size12)*min_fr_pix
angles_range = range(0,stop=2*pi,length=num_angles+1)
angles = collect(angles_range[1:end-1])
num = length(angles)
for g = 1:num
angle_val = angles[g]
img2 = rotate_img(float_img,angle_val)
label2 = rotate_img(label,angle_val)
size1_adj = size12[1]*0.9
size2_adj = size12[2]*0.9
num1 = Int64(floor(size(img2,1)/size1_adj))
num1 = max(num1,1)
num2 = Int64(floor(size(img2,2)/size2_adj))
num2 = max(num2,1)
step1 = Int64(floor(size(img2,1)/num1))
step2 = Int64(floor(size(img2,2)/num2))
num1 = max(num1-1,1)
num2 = max(num2-1,1)
for i in 1:num1
for j in 1:num2
ymin = (i-1)*step1+1
xmin = (j-1)*step2+1
I1 = img2[ymin:ymin+size12[1]-1,xmin:xmin+size12[2]-1,:]
I2 = label2[ymin:ymin+size12[1]-1,xmin:xmin+size12[2]-1,:]
if std(I1)<0.01 || sum(I2)<lim
continue
else
for h in mirroring_inds
if h==1
I1_out = I1
I2_out = I2
elseif h==2
I1_out = reverse(I1, dims = 2)
I2_out = reverse(I2, dims = 2)
end
data_out = (I1_out,I2_out)
push!(data,data_out)
end
end
end
end
end
return data
end
function apply_normalization(model_data::ModelData,data::Vector{Array{Float32,N}}) where N
model_data.normalization.args = (model_data.normalization.f(data)...,)
end
function prepare_data(model_data::ModelData,classification_data::ClassificationData,
size12::Tuple{Int64,Int64},data_preparation_options::DataPreparationOptions,
progress::Channel)
num_angles = data_preparation_options.Images.num_angles
mirroring_inds = Vector{Int64}(undef,0)
if data_preparation_options.Images.mirroring
append!(mirroring_inds,[1,2])
else
push!(mirroring_inds,1)
end
input_urls = classification_data.Urls.input_urls
label_urls = classification_data.Urls.label_urls
labels = map(class -> class.name, model_data.classes)
data_labels_initial = map((label,l) -> repeat([findfirst(label.==labels)],l),label_urls,length.(input_urls))
num = length(input_urls)
# Get number of images
num_all = sum(length.(input_urls))
# Return progress target value
put!(progress, 2*num_all + 1)
# Load images
imgs = map(x -> load_images(x,progress),input_urls)
# Initialize accumulators
data_input = Vector{Vector{Array{Float32,3}}}(undef,num)
data_label = Vector{Vector{Int32}}(undef,num)
chunk_size = convert(Int64,round(num/num_threads()))
@floop ThreadedEx(basesize = chunk_size) for k = 1:num
current_imgs = imgs[k]
num2 = length(current_imgs)
label = data_labels_initial[k]
data_input_temp = Vector{Vector{Array{Float32,3}}}(undef,num2)
data_label_temp = Vector{Vector{Int32}}(undef,num2)
for l = 1:num2
# Abort if requested
#if check_abort_signal(channels.data_preparation_modifiers)
# return nothing
#end
# Get a current image
img_raw = current_imgs[l]
# Convert to float
if :grayscale in model_data.input_properties
img = image_to_gray_float(img_raw)
else
img = image_to_color_float(img_raw)
end
# Augment images
data = augment(img,size12,num_angles,mirroring_inds)
data_input_temp[l] = data
data_label_temp[l] = repeat([label[l]],length(data))
# Return progress
put!(progress, 1)
end
data_input_flat_temp = reduce(vcat,data_input_temp)
data_label_flat_temp = reduce(vcat,data_label_temp)
data_input[k] = data_input_flat_temp
data_label[k] = data_label_flat_temp
end
# Flatten input images and labels array
data_input_flat = reduce(vcat,data_input)
data_label_flat = reduce(vcat,data_label)
# Normalize
apply_normalization(model_data,data_input_flat)
# Return results
classification_data.Data.data_input = data_input_flat
classification_data.Data.data_labels = data_label_flat
# Return progress
put!(progress, 1)
return nothing
end
function prepare_data(model_data::ModelData,regression_data::RegressionData,
size12::Tuple{Int64,Int64},data_preparation_options::DataPreparationOptions,
progress::Channel)
input_size = model_data.input_size
num_angles = data_preparation_options.Images.num_angles
mirroring_inds = Vector{Int64}(undef,0)
if data_preparation_options.Images.mirroring
append!(mirroring_inds,[1,2])
else
push!(mirroring_inds,1)
end
input_urls = regression_data.Urls.input_urls
initial_label_data = copy(regression_data.Urls.initial_data_labels)
# Get number of images
num = length(input_urls)
# Return progress target value
put!(progress, 2*num+1)
num = length(input_urls)
# Load images
imgs = load_images(input_urls,progress)
# Initialize accumulators
data_input = Vector{Vector{Array{Float32,3}}}(undef,num)
data_label = Vector{Vector{Vector{Float32}}}(undef,num)
chunk_size = convert(Int64,round(num/num_threads()))
@floop ThreadedEx(basesize = chunk_size) for k = 1:num
# Abort if requested
#if check_abort_signal(channels.data_preparation_modifiers)
# return nothing
#end
# Get a current image
img_raw = imgs[k]
img_raw = imresize(img_raw,input_size[1:2])
# Get current label
label = initial_label_data[k]
# Convert to float
if :grayscale in model_data.input_properties
img = image_to_gray_float(img_raw)
else
img = image_to_color_float(img_raw)
end
# Augment images
temp_input = augment(img,size12,num_angles,mirroring_inds)
temp_label = repeat([label],length(temp_input))
data_input[k] = temp_input
data_label[k] = temp_label
# Return progress
put!(progress, 1)
end
# Flatten input images and labels array
data_input_flat = reduce(vcat,data_input)
data_label_flat = reduce(vcat,data_label)
# Normalize
apply_normalization(model_data,data_input_flat)
# Return results
regression_data.Data.data_input = data_input_flat
regression_data.Data.data_labels = data_label_flat
# Return progress
put!(progress, 1)
return nothing
end
function prepare_data(model_data::ModelData,segmentation_data::SegmentationData,
size12::Tuple{Int64,Int64},data_preparation_options::DataPreparationOptions,
progress::Channel)
classes = model_data.classes
min_fr_pix = data_preparation_options.Images.min_fr_pix
num_angles = data_preparation_options.Images.num_angles
background_cropping = data_preparation_options.Images.BackgroundCropping
mirroring_inds = Vector{Int64}(undef,0)
if data_preparation_options.Images.mirroring
append!(mirroring_inds,[1,2])
else
push!(mirroring_inds,1)
end
input_urls = segmentation_data.Urls.input_urls
label_urls = segmentation_data.Urls.label_urls
# Get number of images
num = length(input_urls)
# Return progress target value
put!(progress, 3*num+1)
# Get class data
class_inds,labels_color,labels_incl,border,border_thickness = get_class_data(classes)
border_num = (border_thickness.-1).÷2
# Load images
imgs = load_images(input_urls,progress)
labels = load_images(label_urls,progress)
# Initialize accumulators
data_input = Vector{Vector{Array{Float32,3}}}(undef,num)
data_label = Vector{Vector{Array{Float32,3}}}(undef,num)
# Make input images
chunk_size = convert(Int64,round(num/num_threads()))
@floop ThreadedEx(basesize = chunk_size) for k = 1:num
# Abort if requested
#if check_abort_signal(channels.data_preparation_modifiers)
# return nothing
#end
# Get current images
img_raw = imgs[k]
labelimg = labels[k]
# Convert to float
if :grayscale in model_data.input_properties
img = image_to_gray_float(img_raw)
else
img = image_to_color_float(img_raw)
end
# Convert an image to BitArray
label = label_to_bool(labelimg,class_inds,labels_color,labels_incl,border,border_num)
# Crop to remove black background
if background_cropping.enabled
threshold = background_cropping.threshold
closing_value = background_cropping.closing_value
img,label = crop_background(img,label,threshold,closing_value)
end
# Augment images
data = augment(img,label,size12,num_angles,min_fr_pix,mirroring_inds)
data_input[k] = getfield.(data, 1)
data_label[k] = getfield.(data, 2)
# Return progress
put!(progress, 1)
end
# Flatten input images and labels array
data_input_flat = reduce(vcat,data_input)
data_label_flat = reduce(vcat,data_label)
# Normalize
apply_normalization(model_data,data_input_flat)
# Return results
segmentation_data.Data.data_input = data_input_flat
segmentation_data.Data.data_labels = data_label_flat
# Return progress
put!(progress, 1)
return nothing
end
function prepare_data_main(model_data::ModelData,
preparation_data::PreparationData,channels::Channels)
# Initialize
data_preparation_options = options.DataPreparationOptions
size12 = model_data.input_size[1:2]
if problem_type()==:classification
data = preparation_data.ClassificationData
elseif problem_type()==:regression
data = preparation_data.RegressionData
else # problem_type()==:segmentation
data = preparation_data.SegmentationData
end
progress = channels.data_preparation_progress
t = Threads.@spawn prepare_data(model_data,data,size12,data_preparation_options,progress)
push!(preparation_data.tasks,t)
return t
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 402 |
module Design
# Import packages
using
# Machine Learning
Flux, Flux.Losses, FluxExtra, FluxExtra.Normalizations,
# Math functions
Statistics,
# EasyML ecosystem
..Common, ..Common.Design, ..Common.Layers
import Flux.outputsize, ..Common.none
# Include functions
include(string(common_dir(),"/common/classes_design.jl"))
include("main.jl")
include("exported_functions.jl")
export design_model
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 938 |
# Design
"""
design_model()
Opens a GUI for creation of a model.
"""
function design_model()
# Launches GUI
@qmlfunction(
# Handle classes
model_count,
model_get_layer_property,
model_properties,
# Model functions
get_problem_type,
set_problem_type,
get_max_id,
reset_layers,
update_layers,
make_model,
check_model,
move_model,
save_model,
load_model,
# Model design
arrange,
# Data handling
get_data,
set_data,
get_options,
set_options,
save_options,
# Unit testing
unit_test
)
path_qml = string(@__DIR__,"/gui/Design.qml")
gui_dir = string("file:///",replace(@__DIR__, "\\" => "/"),"/gui/")
text = add_templates(path_qml)
loadqml(QByteArray(text), gui_dir = gui_dir)
exec()
return nothing
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 24863 |
#---Set up----------------------------------------------------------------------
model_count() = length(model_data.layers_info)
function get_max_id_main(model_data::ModelData)
if length(model_data.layers_info)>0
ids = map(x-> x.id, model_data.layers_info)
return maximum(ids)
else
return 0
end
end
get_max_id() = get_max_id_main(model_data)
model_properties(index) = string.(collect(fieldnames(typeof(model_data.layers_info[Int64(index)]))))
function model_get_layer_property_main(model_data::ModelData,index,property_name)
layer_info = model_data.layers_info[Int64(index)]
property = getfield(layer_info,Symbol(property_name))
if typeof(property) <: Tuple
return collect(property)
end
return property
end
model_get_layer_property(index,property_name) =
model_get_layer_property_main(model_data,index,property_name)
function reset_layers_main(design_data::DesignData)
empty!(design_data.ModelData.layers_info)
return nothing
end
reset_layers() = reset_layers_main(design_data::DesignData)
#---Model import form QML----------------------------------------------------------------
function get_layer_info(layer_name::String)
if layer_name=="Input"
return InputInfo()
elseif layer_name=="Output"
return OutputInfo()
elseif layer_name=="Convolution"
return ConvInfo()
elseif layer_name=="Transposed convolution"
return TConvInfo()
elseif layer_name=="Dense"
return DenseInfo()
elseif layer_name=="Drop-out"
return DropoutInfo()
elseif layer_name=="Batch normalization"
return BatchNormInfo()
elseif layer_name=="Leaky ReLU"
return LeakyReLUInfo()
elseif layer_name=="ELU"
return ELUInfo()
elseif layer_name=="Max pooling" || layer_name=="Average pooling"
return PoolInfo()
elseif layer_name=="Addition"
return AdditionInfo()
elseif layer_name=="Join"
return JoinInfo()
elseif layer_name=="Split"
return SplitInfo()
elseif layer_name=="Upsample"
return UpsampleInfo()
else
return GenericInfo()
end
end
function update_layers_main(design_data::DesignData,fields,values)
layers_info = design_data.ModelData.layers_info
fields = fix_QML_types(fields)
values = fix_QML_types(values)
layer_info = get_layer_info(values[8])
for i = 1:length(fields)
value_raw = values[i]
field = Symbol(fields[i])
type = typeof(getfield(layer_info,field))
if type <: Tuple
eltypes = type.parameters
l = length(eltypes)
if layer_info isa InputInfo
if length(value_raw)==2
push!(value_raw,1)
end
elseif length(eltypes)!=length(value_raw)
if value_raw isa Array
value_raw = repeat(value_raw,l)
else
value_raw = repeat([value_raw],l)
end
end
value_array = map((T,x)->convert(T,x),eltypes,value_raw)
value = Tuple(value_array)
elseif (type<:Number) && (value_raw isa String)
value = parse(type,value_raw)
else
value = convert(type,value_raw)
end
setproperty!(layer_info,field,value)
end
push!(layers_info,layer_info)
return nothing
end
update_layers(fields,values) = update_layers_main(design_data::DesignData,
fields,values)
#---Topology constructors----------------------------------------------------------------
function topology_linear(layers_arranged::Vector,inds_arranged::Vector,
layers::Vector{AbstractLayerInfo},connections::Vector{Array{Vector{Int64}}},
types::Vector{String},ind)
push!(layers_arranged,layers[ind])
push!(inds_arranged,ind)
ind = connections[ind]
return ind
end
function topology_split(layers_arranged::Vector,inds_arranged::Vector,
layers::Vector{AbstractLayerInfo},connections::Vector{Array{Vector{Int64}}},
connections_in::Vector{Vector{Int64}},types::Vector{String},ind)
types_all = map(i -> types[i],ind)
joining_types_bool = map(type -> all(type=="Join") || all(type=="Addition"),types_all)
num = length(ind)
par_inds = []
inds_return = []
par_layers_arranged = []
next_connections = connections[ind]
unique_connections = unique(next_connections)
if (length(unique_connections)!=length(ind)) && length(unique_connections)>1
ind_new = Vector{Vector}(undef, 0)
joining_types_bool_new = []
con = unique_connections[1]
for con in unique_connections
bool_inds = map(next_con -> next_con==con,next_connections)
new_cons = ind[bool_inds]
push!(ind_new,new_cons)
push!(joining_types_bool_new,joining_types_bool[bool_inds][1])
end
ind = ind_new
joining_types_bool = joining_types_bool_new
num = length(ind_new)
end
for i = 1:num
layers_temp = []
inds_temp = []
if joining_types_bool[i] && !allcmp(next_connections)
push!(inds_return,ind[i])
push!(inds_temp,0)
else
if ind isa Vector{<:Vector}
ind_temp = [ind[i]]
else
ind_temp = [[ind[i]]]
end
ind_out = get_topology_branches(layers_temp,inds_temp,layers,
connections,connections_in,types,ind_temp)[1]
push!(inds_return,ind_out)
type = types[inds_return[i][1]]
if type != "Join" && type != "Addition"
return
# Do not support this
end
end
push!(par_layers_arranged,layers_temp)
push!(par_inds,inds_temp)
end
push!(layers_arranged,par_layers_arranged)
push!(inds_arranged,par_inds)
return inds_return
end
function get_topology_branches(layers_arranged::Vector,inds_arranged::Vector,
layers::Vector{AbstractLayerInfo},connections::Vector{Array{Vector{Int64}}},
connections_in::Vector{Vector{Int64}},types::Vector{String},ind)
while !isempty.([ind])[1]
numk = length(ind)
if numk==1
if any(map(x -> x.=="Join" || x.=="Addition",types[vcat(vcat(ind...)...)]))
if length(ind)==1 && length(ind[1])>1
ind = topology_split(layers_arranged,inds_arranged,layers,
connections,connections_in,types,ind[1])
elseif length(ind)>1 && allcmp(ind[1])
ind_in_actual = connections_in[ind[1][1]]
ind_in_arranged = vcat(vcat(inds_arranged[end]...)...)
ind_0 = findfirst(ind_in_arranged.==0)
if !isempty(ind_0)
ind_in_arranged[ind_0] = vcat(vcat(inds_arranged[end-1]...)...)
end
inds_to_use = map(x -> findfirst(x.==ind_in_actual),ind_in_arranged)
if inds_to_use!=1:length(inds_to_use)
layers_arranged[end] = layers_arranged[end][inds_to_use]
inds_arranged[end] = inds_arranged[end][inds_to_use]
end
ind = topology_linear(layers_arranged,inds_arranged,
layers,connections,types,ind[1][1])
else
return ind
end
else
if length(ind[1])==1
ind = topology_linear(layers_arranged,inds_arranged,
layers,connections,types,ind[1][1])
else
ind = topology_split(layers_arranged,inds_arranged,layers,
connections,connections_in,types,ind[1])
end
end
else
if any(map(x -> x.=="Join" || x.=="Addition",types[vcat(vcat(ind...)...)]))
if length(ind)>1 && allcmp(ind)
ind_in_actual = connections_in[ind[1][1]]
ind_in_arranged_raw = inds_arranged[end]
ind_in_arranged = Vector{Int64}(undef,length(ind_in_actual))
for i = 1:length(ind_in_actual)
ind_current = ind_in_arranged_raw[i]
while true
if ind_current isa Vector
ind_current = ind_current[end]
else
break
end
end
ind_in_arranged[i] = ind_current
end
ind_0 = findfirst(ind_in_arranged.==0)
if !isnothing(ind_0)
ind_in_arranged[ind_0] = vcat(vcat(inds_arranged[end-1]...)...)[1]
end
inds_to_use = map(x -> findfirst(x.==ind_in_actual),ind_in_arranged)
if inds_to_use!=1:length(inds_to_use)
layers_arranged[end] = layers_arranged[end][inds_to_use]
inds_arranged[end] = inds_arranged[end][inds_to_use]
end
ind = topology_linear(layers_arranged,inds_arranged,
layers,connections,types,ind[1][1])
elseif all(length.(ind).==1)
ind = topology_split(layers_arranged,inds_arranged,layers,
connections,connections_in,types,vcat(ind...))
else
return
end
else
if all(length.(ind).==1)
ind = topology_split(layers_arranged,inds_arranged,layers,
connections,connections_in,types,vcat(ind...))
else
return ind
end
end
end
end
return ind
end
function get_topology(model_data::DesignModelData)
#layers = model_data.layers_info
layers = design_data.ModelData.layers_info
types = [layers[i].type for i = 1:length(layers)]
ind_vec = findall(types .== "Input")
if isempty(ind_vec)
msg = "No input layer."
@error msg
push!(design_data.warnings,msg)
return nothing,nothing
elseif length(ind_vec)>1
msg = "More than one input layer."
@error msg
push!(design_data.warnings,msg)
return nothing,nothing
end
connections = Vector{Array{Vector{Int64}}}(undef,0)
connections_in = Vector{Vector{Int64}}(undef,0)
for i = 1:length(layers)
push!(connections,layers[i].connections_down)
push!(connections_in,layers[i].connections_up)
end
ind = ind_vec[1]
layers_arranged = []
inds_arranged = []
push!(layers_arranged,layers[ind])
push!(inds_arranged,ind)
ind = connections[ind]
ind = get_topology_branches(layers_arranged,inds_arranged,layers,
connections,connections_in,types,ind)
if isempty(inds_arranged[end])
inds_arranged = inds_arranged[1:end-1]
end
return layers_arranged, inds_arranged
end
#---Model visual representation constructors-----------------------------------------------
function arrange_layer(coordinates::Array,coordinate::Array{Float64},
design_options::DesignOptions)
coordinate[2] = coordinate[2] + design_options.min_dist_y + design_options.height
push!(coordinates,coordinate)
return coordinate
end
function arrange_branches(coordinates,coordinate::Vector{Float64},
design_options::DesignOptions,layers)
num = layers isa AbstractLayerInfo ? 1 : length(layers)
if num==1
coordinate = arrange_layer(coordinates,copy(coordinate),design_options)
else
max_num = ones(Int64,num)
for i = 1:length(layers)
temp1 = layers[i]
for temp2 in temp1
if temp2 isa Vector
width = length(temp2)
if width>max_num[i]
max_num[i] = width
end
end
end
end
par_coordinates = []
x_coordinates = []
push!(x_coordinates,coordinate[1])
for i=2:num
prev_layer_right = x_coordinates[end] .+ max_num[i-1]*design_options.width .+ (max_num[i-1]-1)*design_options.min_dist_x
current_layer_left = prev_layer_right .+ (max_num[i]-1)*design_options.width .+ max_num[i]*design_options.min_dist_x
push!(x_coordinates,current_layer_left)
end
x_coordinates = x_coordinates .-
(mean([x_coordinates[1],x_coordinates[end]])-coordinate[1])
for i = 1:num
temp_coordinates = []
temp_coordinate = [x_coordinates[i],coordinate[2]]
if isempty(layers[i])
push!(temp_coordinates,[x_coordinates[i],coordinate[2]])
else
for j = 1:length(layers[i])
temp_coordinate = arrange_branches(temp_coordinates,temp_coordinate,
design_options,layers[i][j])
end
end
push!(par_coordinates,temp_coordinates)
end
push!(coordinates,copy(par_coordinates))
coordinate = [coordinate[1],
maximum(map(x-> x[end],map(x -> x[end],par_coordinates)))]
end
return coordinate
end
function get_values!(values::Array,array::Array,cond_fun)
for i=1:length(array)
temp = array[i]
if cond_fun(temp)
get_values!(values,temp,cond_fun)
else
push!(values,temp)
end
end
return nothing
end
function arrange_main(design_data::DesignData,design_options::DesignOptions)
layers_arranged,inds_arranged = get_topology(design_data.ModelData)
coordinates = []
coordinate = [layers_arranged[1].x,layers_arranged[1].y]
push!(coordinates,coordinate)
for i = 2:length(inds_arranged)
layers = layers_arranged[i]
coordinate = arrange_branches(coordinates,
coordinate,design_options,layers)
end
coordinates_flattened = []
get_values!(coordinates_flattened,coordinates,
x-> x isa Array && x[1] isa Array)
inds_flattened = []
get_values!(inds_flattened,inds_arranged,x-> x isa Array)
true_elements = inds_flattened.>0
coordinates_flattened = coordinates_flattened[true_elements]
inds_flattened = inds_flattened[true_elements]
return [coordinates_flattened,inds_flattened.-1]
end
arrange() = arrange_main(design_data,design_options)
#---Model constructors--------------------------------------------------------------------
function getlinear(type::String, layer_info, in_size::Tuple{Int64,Int64,Int64})
if type == "Convolution"
layer = Conv(
layer_info.filter_size,
in_size[3] => layer_info.filters,
pad = SamePad(),
stride = layer_info.stride,
dilation = layer_info.dilation_factor
)
out = outputsize(layer, (in_size...,1))[1:3]
return (layer, out)
elseif type == "Transposed convolution"
layer = ConvTranspose(
layer_info.filter_size,
in_size[3] => layer_info.filters,
pad = SamePad(),
stride = layer_info.stride,
dilation = layer_info.dilation_factor,
)
out = outputsize(layer, (in_size...,1))[1:3]
return (layer, out)
elseif type == "Dense"
layer = Dense(in_size[1], layer_info.filters)
out = (layer_info.filters, in_size[2:3]...)
return (layer, out)
end
end
function getnorm(type::String, layer_info, in_size::Tuple{Int64,Int64,Int64})
if type == "Drop-out"
return Dropout(layer_info.probability)
elseif type == "Batch normalization"
return BatchNorm(in_size[end], ϵ = Float32(layer_info.epsilon))
end
end
function getactivation(type::String, layer_info, in_size::Tuple{Int64,Int64,Int64})
if type == "ReLU"
return Activation(relu)
elseif type == "Leaky ReLU"
return Activation(leakyrelu)
elseif type == "ELU"
return Activation(elu)
elseif type == "Tanh"
return Activation(tanh)
elseif type == "Sigmoid"
return Activation(sigmoid)
end
end
function getpooling(type::String, layer_info, in_size::Tuple{Int64,Int64,Int64})
poolsize = layer_info.poolsize
stride = layer_info.stride
temp_layer = MaxPool(poolsize, stride=2)
if type == "Max pooling"
layer = MaxPool(poolsize, stride=stride, pad=SamePad())
elseif type == "Average pooling"
layer = MeanPool(poolsize, stride=stride, pad=SamePad())
end
out12 = in_size./stride
out = (Int64(out12[1]),Int64(out12[2]),in_size[3])
return (layer,out)
end
function getresizing(type::String, layer_info, in_size)
if type == "Addition"
if in_size[1]==in_size[2]
out = (in_size[1][1], in_size[1][2], in_size[1][3])
return (Addition(), out)
else
msg = string("Cannot add arrays with sizes ",in_size[1]," and ",in_size[2],".")
@error msg
return nothing,nothing
end
elseif type == "Join"
dim = layer_info.dimension
dims = collect(1:3)
deleteat!(dims,dim)
temp_size = map(x-> getindex(x,dims),in_size)
if temp_size[1]==temp_size[2]
new_size = Array{Int64}(undef, length(in_size))
for i = 1:length(in_size)
new_size[i] = in_size[i][dim]
end
new_size = sum(new_size)
if dim == 1
out = (new_size, in_size[1][2], in_size[1][3])
elseif dim == 2
out = (in_size[1][1], new_size, in_size[1][3])
elseif dim == 3
out = (in_size[1][1], in_size[1][2], new_size)
end
return (Join(dim), out)
else
msg = string("Cannot join arrays with sizes ",in_size[1]," and ",in_size[2],".")
@error msg
return nothing,nothing
end
elseif type == "Split"
local out_size
dim = layer_info.dimension
nout = layer_info.outputs
if dim == 1
out = (in_size[1] / nout, in_size[2:3]...)
elseif dim == 2
out = (in_size[1], in_size[2] / nout, in_size[3])
elseif dim == 3
out = (in_size[1], in_size[2], in_size[3] / nout)
end
if ceil.(out)==out
return (Split(nout, dim), Int64.(out))
else
msg = string("Size should be a tuple of integers.")
@error msg
return nothing,nothing
end
elseif type == "Upsample"
multiplier = layer_info.multiplier
dims = layer_info.dimensions
out = [in_size...]
for i in dims
out[i] = out[i] * multiplier
end
out = (out...,)
return (Upsample(scale = multiplier), out)
elseif type == "Flatten"
out = (prod(in_size), 1, 1)
return (Flatten(), out)
end
end
function getlayer(layer, in_size)
if layer.group == "linear"
layer_f, out = getlinear(layer.type, layer, in_size)
elseif layer.group == "norm"
layer_f = getnorm(layer.type, layer, in_size)
out = in_size
elseif layer.group == "activation"
layer_f = getactivation(layer.type, layer, in_size)
out = in_size
elseif layer.group == "pooling"
layer_f, out = getpooling(layer.type, layer, in_size)
elseif layer.group == "resizing"
layer_f, out = getresizing(layer.type, layer, in_size)
end
return (layer_f, out)
end
function getbranch(layer_params,in_size)
num = layer_params isa AbstractLayerInfo ? 1 : length(layer_params)
if num==1
layer, in_size = getlayer(layer_params, in_size)
if isnothing(layer)
return nothing,nothing
end
else
par_layers = []
par_size = []
for i = 1:num
if in_size isa Array
temp_size = in_size[i]
else
temp_size = in_size
end
if isempty(layer_params[i])
temp_layers = [Identity()]
else
temp_layers = []
for j = 1:length(layer_params[i])
layer,temp_size = getbranch(layer_params[i][j],temp_size)
if isnothing(layer)
return nothing,nothing
end
push!(temp_layers,layer)
end
end
if length(temp_layers)>1
push!(par_layers,Chain(temp_layers...))
else
push!(par_layers,temp_layers[1])
end
push!(par_size,temp_size)
end
layer = Parallel(tuple,(par_layers...,))
in_size = par_size
end
return layer,in_size
end
function make_model_main(design_data::DesignData)
model_data_design = design_data.ModelData
layers_arranged,_ = get_topology(model_data_design)
if isnothing(layers_arranged)
msg = "Something went wrong during model topology analysis."
@error msg
push!(design_data.warnings, msg)
return false
elseif layers_arranged[end].type!="Output"
msg = "No output layer."
@error msg
push!(design_data.warnings, msg)
return false
end
input_layer_info = layers_arranged[1]
in_size = (input_layer_info.size...,)
model_data_design.input_size = in_size
normalization_ind = input_layer_info.normalization + 1
model_data_design.normalization.f = get_normalization(normalization_ind)
model_data_design.normalization.args = ()
popfirst!(layers_arranged)
loss_ind = layers_arranged[end].loss + 1
model_data_design.loss = get_loss(loss_ind)
pop!(layers_arranged)
model_layers = []
for i = 1:length(layers_arranged)
layer_params = layers_arranged[i]
layer,in_size = getbranch(layer_params,in_size)
if isnothing(layer)
msg = "Something went wrong during Flux model creation."
@error msg
push!(design_data.warnings, msg)
return false
end
push!(model_layers,layer)
end
model_data_design.model = Chain(model_layers...)
return true
end
make_model() = make_model_main(design_data)
function check_model_main(design_data::DesignData)
model_data_design = design_data.ModelData
input = zeros(Float32,model_data_design.input_size...,1)
try
output = model_data_design.model(input)
output_size_temp = size(output)
if length(output_size_temp)==2
output_size = (output_size_temp...,1)
else
output_size = size(output)[1:end-1]
end
model_data_design.output_size = output_size
if problem_type()==:classification && (output_size[2]!=1 && output_size[3]!=1)
@error "Use flatten before an output. Otherwise, the model will not function correctly."
push!(design_data.warnings,"Use flatten before an output. Otherwise, the model will not function correctly.")
return false
end
catch e
print(e)
@error e
push!(design_data.warnings,"Something is wrong with your model.")
return false
end
end
check_model() = check_model_main(design_data)
function move_model_main(model_data::ModelData,design_data::DesignData)
model_data2 = design_data.ModelData
model_data.model = deepcopy(model_data2.model)
model_data.layers_info = deepcopy(model_data2.layers_info)
model_data.input_size = model_data2.input_size
model_data.output_size = model_data2.output_size
model_data.loss = model_data2.loss
design_data.ModelData = DesignModelData()
end
move_model() = move_model_main(model_data,design_data)
#---Input normalization--------------------------------------------------------
function get_normalization(ind::Int64)
normalizations = (none,norm_01!,norm_negpos1!,norm_zerocenter!,norm_zscore!)
return normalizations[ind]
end
#---Losses---------------------------------------------------------------------
function get_loss(ind::Int64)
losses = (mae,mse,msle,huber_loss,crossentropy,logitcrossentropy,binarycrossentropy,
logitbinarycrossentropy,kldivergence,poisson_loss,hinge_loss,squared_hinge_loss,
dice_coeff_loss,tversky_loss)
return losses[ind]
end
#---Other----------------------------------------------------------------------
function allcmp(inds)
for i = 1:length(inds)
if inds[1][1] != inds[i][1]
return false
end
end
return true
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 722 |
module Training
# Import packages
using
# Interfacing
CUDA, Qt5Charts_jll,
# Data structuring
Dates,
# Machine learning
Flux, FluxExtra,
# Math functions
Random, StatsBase, LinearAlgebra, Combinatorics, Distances,
# EasyML ecosystem
..Common, ..Common.Training
import CUDA.CuArray, StatsBase.std
import ..Common.DataPreparation: ClassificationData, RegressionData, SegmentationData
# Include functions
include(string(common_dir(),"/common/training_validation.jl"))
include("main.jl")
include("exported_functions.jl")
export training_options, TrainingOptions, training_results_data
export set_weights, set_training_data, set_testing_data, train, remove_training_data, remove_testing_data, remove_training_results
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 7707 |
"""
change(training_options::TrainingOptions)
Allows to change `training_options` in a GUI.
"""
function Common.change(data::TrainingOptions)
@qmlfunction(
get_data,
get_options,
set_options,
save_options,
unit_test
)
path_qml = string(@__DIR__,"/gui/TrainingOptions.qml")
gui_dir = string("file:///",replace(@__DIR__, "\\" => "/"),"/gui/")
text = add_templates(path_qml)
loadqml(QByteArray(text), gui_dir = gui_dir)
exec()
return nothing
end
"""
set_training_data(data_input::Vector,data_labels::Vector)
Sets data for training.
"""
function set_training_data(data_input::Vector,data_labels::Vector)
l_input = length(data_input)
l_labels = length(data_labels)
if l_labels!=l_labels
err = string("Input data length does not equal label data length. ",l_input," vs ",l_labels,".")
error(err)
end
if problem_type()==:classification
training_data.ClassificationData.Data.data_input = data_input
training_data.ClassificationData.Data.data_labels = data_labels
elseif problem_type()==:regression
training_data.RegressionData.Data.data_input = data_input
training_data.RegressionData.Data.data_labels = data_labels
else # Segmentation
training_data.SegmentationData.Data.data_input = data_input
training_data.SegmentationData.Data.data_labels = data_labels
end
return nothing
end
"""
set_testing_data(data_input::Vector,data_labels::Vector)
Sets data for testing.
"""
function set_testing_data(data_input::Vector,data_labels::Vector)
l_input = length(data_input)
l_labels = length(data_labels)
if l_labels!=l_labels
err = string("Input data length does not equal label data length. ",l_input," vs ",l_labels,".")
error(err)
end
if problem_type()==:classification
testing_data.ClassificationData.Data.data_input = data_input
testing_data.ClassificationData.Data.data_labels = data_labels
elseif problem_type()==:regression
testing_data.RegressionData.Data.data_input = data_input
testing_data.RegressionData.Data.data_labels = data_labels
else # Segmentation
testing_data.SegmentationData.Data.data_input = data_input
testing_data.SegmentationData.Data.data_labels = data_labels
end
return nothing
end
function get_train_test_inds(num::Int64,fraction::Float64)
inds = randperm(num) # Get shuffled indices
ind_last_test = convert(Int64,round(fraction*num))
inds_train = inds[ind_last_test+1:end]
inds_test = inds[1:ind_last_test]
if isempty(inds_test)
@warn string("Fraction of ",fraction," from ",num,
" files is 0. Increase the fraction of data used for testing to at least ",round(1/num,digits=2),".")
end
return inds_train,inds_test
end
function set_testing_data_main(training_data::TrainingData,testing_data::TestingData,training_options::TrainingOptions)
if problem_type()==:classification
specific_training_data = training_data.ClassificationData.Data
specific_testing_data = testing_data.ClassificationData.Data
elseif problem_type()==:regression
specific_training_data = training_data.RegressionData.Data
specific_testing_data = testing_data.RegressionData.Data
else # :segmentation
specific_training_data = training_data.SegmentationData.Data
specific_testing_data = testing_data.SegmentationData.Data
end
num = length(specific_training_data.data_input)
fraction = training_options.Testing.test_data_fraction
inds_train,inds_test = get_train_test_inds(num,fraction)
specific_testing_data.data_input = specific_training_data.data_input[inds_test]
specific_testing_data.data_labels = specific_training_data.data_labels[inds_test]
specific_training_data.data_input = specific_training_data.data_input[inds_train]
specific_training_data.data_labels = specific_training_data.data_labels[inds_train]
return nothing
end
"""
set_testing_data()
A fraction of training data also specified in training options is set aside for testing.
"""
set_testing_data() = set_testing_data_main(training_data,testing_data,training_options)
function set_weights_main(ws_in::Vector{<:Real},training_data::TrainingData)
if isempty(ws_in)
training_data.weights = Vector{Float32}(undef,0)
return nothing
end
ws = convert(Vector{Float32},ws_in)
s = sum(ws)
if s==1
training_data.weights = ws
else
training_data.weights = ws/s
end
return nothing
end
"""
set_weights(ws::Vector{<:Real})
Set weights for weight accuracy to `ws`. If `sum(ws) ≠ 1` then it is adjusted to be so.
If weights are not specified then inverse frequency of labesl is used.
"""
set_weights(ws) = set_weights_main(ws,training_data)
"""
train()
Opens a GUI where training progress can be observed. Training parameters
such as a number of epochs, learning rate and a number of tests per epoch
can be changed during training.
"""
function train()
if problem_type()==:classification
data_train = training_data.ClassificationData.Data.data_input
data_test = testing_data.ClassificationData.Data.data_input
elseif problem_type()==:regression
data_train = training_data.RegressionData.Data.data_input
data_test = testing_data.RegressionData.Data.data_input
else # :segmentation
data_train = training_data.SegmentationData.Data.data_input
data_test = testing_data.SegmentationData.Data.data_input
end
if isempty(data_train)
@error "No training data."
return nothing
end
training_data.OptionsData.run_test = !isempty(data_test)
empty_channel(:training_start_progress)
empty_channel(:training_progress)
empty_channel(:training_modifiers)
t = train_main2(model_data,all_data,options,channels)
# Launches GUI
@qmlfunction(
# Data handling
set_data,
get_data,
get_options,
get_progress,
put_channel,
# Training related
set_training_starting_time,
training_elapsed_time,
# Other
yield,
time,
unit_test
)
path_qml = string(@__DIR__,"/gui/TrainingPlot.qml")
gui_dir = string("file:///",replace(@__DIR__, "\\" => "/"),"/gui/")
text = add_templates(path_qml)
loadqml(QByteArray(text), gui_dir = gui_dir)
exec()
state,err = check_task(t)
if state==:error
@warn string("Training aborted due to the following error: ",err)
end
return training_data.Results
end
function remove_data(some_data::T) where T<:Union{TrainingData,TestingData}
fields = [:data_input,:data_labels]
for field in fields
empty!(getfield(some_data.ClassificationData.Data,field))
empty!(getfield(some_data.RegressionData.Data,field))
empty!(getfield(some_data.SegmentationData.Data,field))
end
fields = fieldnames(T)[4:end]
for field in fields
data = getfield(some_data,field)
if data isa Array
empty!(data)
end
end
return nothing
end
"""
remove_training_data()
Removes all training data except for result.
"""
remove_training_data() = remove_data(training_data)
"""
remove_testing_data()
Removes all testing data.
"""
remove_testing_data() = remove_data(testing_data)
"""
remove_training_results()
Removes training results.
"""
function remove_training_results()
data = training_data.Results
fields = fieldnames(TrainingResultsData)
for field in fields
empty!(getfield(data, field))
end
end | EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 25983 |
# Set training starting time
function time()
date = string(now())
date = date[1:19]
date = replace(date,"T"=>" ")
return date
end
function set_training_starting_time_main(training_plot_data::TrainingPlotData)
training_plot_data.starting_time = now()
return nothing
end
set_training_starting_time() =
set_training_starting_time_main(training_plot_data)
# Calculates the time elapsed from the begining of training
function training_elapsed_time_main(training_plot_data::TrainingPlotData)
dif = (now() - training_plot_data.starting_time).value
hours = string(Int64(floor(dif/3600000)))
minutes_num = floor(dif/60000)
minutes = string(Int64(minutes_num - floor(minutes_num/60)*60))
if length(minutes)<2
minutes = string("0",minutes)
end
seconds_num = round(dif/1000)
seconds = string(Int64(seconds_num - floor(seconds_num/60)*60))
if length(seconds)<2
seconds = string("0",seconds)
end
return string(hours,":",minutes,":",seconds)
end
training_elapsed_time() = training_elapsed_time_main(training_plot_data)
#---
# Creates data sets for training and testing
function get_sets(norm_func::Function, typed_training_data::T,typed_testing_data::T) where
T<:Union{ClassificationData,RegressionData,SegmentationData}
map(x -> norm_func(x), typed_training_data.Data.data_input)
map(x -> norm_func(x), typed_testing_data.Data.data_input)
train_set = (typed_training_data.Data.data_input,typed_training_data.Data.data_labels)
test_set = (typed_testing_data.Data.data_input,typed_testing_data.Data.data_labels)
return train_set, test_set
end
# Creates a minibatch
function make_minibatch_inds(num_data::Int64,batch_size::Int64)
# Calculate final index
num = num_data - batch_size
val = Int64(max(0.0,floor(num/batch_size)))
finish = val*batch_size
# Get indices
inds_start = collect(0:batch_size:finish)
inds_all = collect(1:num_data)
# Number of indices
num = length(inds_start)
return inds_start,inds_all,num
end
#----------------
function make_minibatch(data_input::Vector{Array{Float32,N}},data_labels::Vector{Int32},
max_labels::Int64,batch_size::Int64,inds_start::Vector{Int64},
inds_all::Vector{Int64},i::Int64) where N
ind = inds_start[i]
# First and last minibatch indices
ind1 = ind+1
ind2 = ind+batch_size
# Get inputs and labels
current_inds = inds_all[ind1:ind2]
l = length(current_inds)
current_input = Vector{Array{Float32,N+1}}(undef,l)
current_labels = Vector{Array{Float32,2}}(undef,l)
for j = 1:l
ind = current_inds[j]
temp = zeros(Float32,max_labels)
ind_temp = data_labels[ind]
temp[ind_temp] = 1
current_labels[j] = add_dim(temp)
current_input[j] = add_dim(data_input[ind])
end
# Catenating inputs and labels
ncat = (x,y)->cat(x,y, dims=Val(N+1))
current_input_cat = reduce(ncat,current_input)
current_labels_cat = reduce(hcat,current_labels)
# Form a minibatch
return (current_input_cat,current_labels_cat)
end
function make_minibatch(data_input::Vector{Array{Float32,N1}},data_labels::Vector{Array{Float32,N2}},
max_labels::Int64,batch_size::Int64,inds_start::Vector{Int64},
inds_all::Vector{Int64},i::Int64) where {N1,N2}
ind = inds_start[i]
# First and last minibatch indices
ind1 = ind+1
ind2 = ind+batch_size
# Get inputs and labels
current_inds = inds_all[ind1:ind2]
l = length(current_inds)
current_input = Vector{Array{Float32,N1+1}}(undef,l)
current_labels = Vector{Array{Float32,N2+1}}(undef,l)
for j = 1:l
ind = current_inds[j]
current_labels[j] = add_dim(data_labels[ind])
current_input[j] = add_dim(data_input[ind])
end
# Catenating inputs and labels
ncat1 = (x,y)->cat(x,y, dims=Val(N1+1))
ncat2 = (x,y)->cat(x,y, dims=Val(N2+1))
input_cat = reduce(ncat1,current_input)
labels_cat = reduce(ncat2,current_labels)
return (input_cat,labels_cat)
end
function make_minibatch(data_input::Vector{Array{Float32,N1}},data_labels_bool::Vector{BitArray{N2}},
max_labels::Int64,batch_size::Int64,inds_start::Vector{Int64},
inds_all::Vector{Int64},i::Int64) where {N1,N2}
ind = inds_start[i]
# First and last minibatch indices
ind1 = ind+1
ind2 = ind+batch_size
# Get inputs and labels
current_inds = inds_all[ind1:ind2]
l = length(current_inds)
current_input = Vector{Array{Float32,N1+1}}(undef,l)
current_labels = Vector{Array{Float32,N2+1}}(undef,l)
for j = 1:l
ind = current_inds[j]
data_labels_temp = convert(Array{Float32,N2},data_labels_bool[ind])
current_labels[j] = add_dim(data_labels_temp)
current_input[j] = add_dim(data_input[ind])
end
# Catenating inputs and labels
ncat1 = (x,y)->cat(x,y, dims=Val(N1+1))
ncat2 = (x,y)->cat(x,y, dims=Val(N2+1))
input_cat = reduce(ncat1,current_input)
labels_cat = reduce(ncat2,current_labels)
# Form a minibatch
minibatch = (input_cat,labels_cat)
return minibatch
end
#----------------
# Reset training related data accumulators
function reset_training_data(training_data::TrainingData)
training_data.warnings = String[]
training_data.errors = String[]
training_plot_data = training_data.PlotData
training_results_data = training_data.Results
training_results_data.accuracy = Float32[]
training_results_data.loss = Float32[]
training_results_data.test_accuracy = Float32[]
training_results_data.test_loss = Float32[]
training_plot_data.iteration = 0
training_plot_data.epoch = 0
training_plot_data.iterations_per_epoch = 0
training_plot_data.starting_time = now()
training_plot_data.max_iterations = 0
training_plot_data.learning_rate_changed = false
return nothing
end
function clean_up_training(training_plot_data::TrainingPlotData)
training_plot_data.iteration = 0
training_plot_data.epoch = 0
training_plot_data.iterations_per_epoch = 0
training_plot_data.starting_time = now()
training_plot_data.max_iterations = 0
training_plot_data.learning_rate_changed = false
end
#---
# Returns an optimiser with preset parameters
function get_optimiser(training_options::TrainingOptions)
# List of possible optimisers
optimisers = (Descent,Momentum,Nesterov,RMSProp,ADAM,
RADAM,AdaMax,ADAGrad,ADADelta,AMSGrad,NADAM,ADAMW)
optimiser_names = (:Descent,:Momentum,:Nesterov,:RMSProp,:ADAM,
:RADAM,:AdaMax,:ADAGrad,:ADADelta,:AMSGrad,:NADAM,:ADAMW)
# Get optimiser index
optimiser_ind = findfirst(training_options.Hyperparameters.optimiser.==optimiser_names)
# Get optimiser parameters
parameters_in = training_options.Hyperparameters.optimiser_params
# Get learning rate
learning_rate = training_options.Hyperparameters.learning_rate
# Collect optimiser parameters and learning rate
if length(parameters_in)==0
parameters = [learning_rate]
elseif length(parameters_in)==1
parameters = [learning_rate,parameters_in[1]]
elseif length(parameters_in)==2
parameters = [learning_rate,(parameters_in[1],parameters_in[2])]
else
parameters = [learning_rate,(parameters_in[1],parameters_in[2]),parameters_in[3]]
end
# Get optimiser function
optimiser_func = optimisers[optimiser_ind]
# Initialize optimiser with parameters
optimiser = optimiser_func(parameters...)
return optimiser
end
#---Weights---------------------------------------------------------------------
function calculate_weights(counts::Vector{Int64})
frequencies = counts./sum(counts)
inv_frequencies = 1 ./frequencies
weights64 = inv_frequencies./sum(inv_frequencies)
weights = convert(Vector{Float32},weights64)
return weights
end
function get_weights(model_data::ModelData,classification_data::ClassificationData)
data_labels = classification_data.Data.data_labels
num = length(model_data.classes)
counts = zeros(Int64,num)
for data in data_labels
counts[data] += 1
end
weights = calculate_weights(counts)
return weights
end
function get_weights(model_data::ModelData,regression_data::RegressionData)
weights = Vector{Float32}(undef,0)
return weights
end
function get_weights(model_data::ModelData,segmentation_data::SegmentationData)
data_labels = segmentation_data.Data.data_labels
num = size(data_labels[1],3)
counts = zeros(Int64,num)
for data in data_labels
counts .+= collect(Iterators.flatten(sum(data,dims = [1,2])))
end
weights = calculate_weights(counts)
return weights
end
#---
function minibatch_part(data_input,data_labels,max_labels,epochs,num,inds_start,inds_all,
counter,run_test,data_input_test,data_labels_test,inds_start_test,inds_all_test,counter_test,
num_test,batch_size,minibatch_channel,minibatch_test_channel,testing_mode,abort)
epoch_idx = 1
iteration_local = 0
iteration_test_local = 0
# Data preparation
while true
# Shuffle indices
inds_start_sh = shuffle!(inds_start)
inds_all_sh = shuffle!(inds_all)
if run_test
inds_start_test_sh = shuffle!(inds_start_test)
inds_all_test_sh = shuffle!(inds_all_test)
end
cnt = 0
while true
while epoch_idx<=epochs[]
numel_channel = (iteration_local-counter.iteration)
if numel_channel<10
iteration_local += 1
cnt += 1
minibatch = make_minibatch(data_input,data_labels,max_labels,batch_size,
inds_start_sh,inds_all_sh,cnt)
put!(minibatch_channel,minibatch)
break
elseif run_test && testing_mode[]
break
else
sleep(0.01)
end
end
if run_test && testing_mode[]
cnt_test = 0
while true
numel_test_channel = (iteration_test_local-counter_test.iteration)
if numel_test_channel<10
cnt_test += 1
iteration_test_local += 1
minibatch = make_minibatch(data_input_test,data_labels_test,max_labels,batch_size,
inds_start_test_sh,inds_all_test_sh,cnt_test)
put!(minibatch_test_channel,minibatch)
else
sleep(0.01)
end
if cnt_test==num_test
Threads.atomic_xchg!(testing_mode, false)
break
end
end
end
sleep(0.01)
if abort[]
return nothing
end
if cnt==num
break
end
end
# Update epoch counter
epoch_idx += 1
end
return nothing
end
function check_modifiers(model_data,model,model_name,accuracy_vector,
loss_vector,allow_lr_change,composite,opt,i,num,epochs,max_iterations,
num_tests,global_iteration_test,modifiers_channel,abort;gpu=false)
while isready(modifiers_channel)
modifs = take!(modifiers_channel)
modif1 = modifs[1]
if modif1==0 # stop
Threads.atomic_xchg!(abort, true)
# Save model
if gpu==true
model_data.model = cpu(model)
else
model_data.model = model
end
save_model(model_name)
break
elseif modif1==1 # learning rate
if allow_lr_change
if composite
opt[1].eta = convert(Float64,modifs[2])
else
opt.eta = convert(Float64,modifs[2])
end
end
elseif modif1==2 # epochs
new_epochs::Int64 = convert(Int64,modifs[2])
new_max_iterations::Int64 = convert(Int64,new_epochs*num)
Threads.atomic_xchg!(epochs, new_epochs)
Threads.atomic_xchg!(max_iterations, new_max_iterations)
resize!(accuracy_vector,max_iterations[])
resize!(loss_vector,max_iterations[])
elseif modif1==3 # number of tests
num_tests::Float64 = modifs[2]
frequency = num/num_tests
global_iteration_test = floor(i/frequency)
end
end
return num_tests,global_iteration_test
end
function teach!(model::Chain,loss::Function,opt,input_data::AbstractArray{Float32},actual::AbstractArray{Float32})
local predicted
local loss_val
ps = Flux.Params(Flux.params(model))
# Calculate gradient
gs = gradient(ps) do
predicted = model(input_data)
loss_val = loss(predicted,actual)
end
# Update weights
Flux.Optimise.update!(opt,ps,gs)
return predicted,loss_val
end
function training_part(model_data,model,model_name,opt,accuracy,loss,T_out,move_f,
accuracy_vector,loss_vector,counter,accuracy_test_vector,loss_test_vector,
iteration_test_vector,counter_test,num_test,epochs,num,max_iterations,
num_tests,allow_lr_change,composite,run_test,minibatch_channel,
minibatch_test_channel,channels,use_GPU,testing_mode,abort)
epoch_idx = 1
while epoch_idx<=epochs[]
global_iteration_test = 0
for i = 1:num
counter()
iteration = counter.iteration
# Prepare training data
local minibatch_data::eltype(minibatch_channel.data)
while true
# Update parameters or abort if needed
if isready(channels.training_modifiers)
num_tests,iteration_global_counter = check_modifiers(model_data,model,model_name,
accuracy_vector,loss_vector,allow_lr_change,composite,opt,i,num,epochs,
max_iterations,num_tests,global_iteration_test,
channels.training_modifiers,abort;gpu=use_GPU)
if abort[]==true
return nothing
end
end
if isready(minibatch_channel)
minibatch_data = take!(minibatch_channel)
break
else
sleep(0.01)
end
end
input_data = move_f(minibatch_data[1])
actual = move_f(minibatch_data[2])
# Teach the model
predicted,loss_val = teach!(model,loss,opt,input_data,actual)
# Calculate accuracy
accuracy_val = accuracy(predicted,actual)
# Return training information
put!(channels.training_progress,("Training",accuracy_val,loss_val,iteration))
accuracy_vector[iteration] = accuracy_val
loss_vector[iteration] = loss_val
# Testing part
if run_test && num_tests!=0
training_started_cond = i==1 && epoch_idx==1
num_tests_cond = i>global_iteration_test*ceil(num/num_tests)
training_finished_cond = iteration==(max_iterations[])
# Test if testing frequency reached or training is done
if num_tests_cond || training_started_cond || training_finished_cond
global_iteration_test += 1
Threads.atomic_xchg!(testing_mode, true)
# Calculate test accuracy and loss
data_test = test(model,accuracy,loss,minibatch_test_channel,counter_test,num_test,move_f,abort)
# Return testing information
put!(channels.training_progress,("Testing",data_test...,iteration))
push!(accuracy_test_vector,data_test[1])
push!(loss_test_vector,data_test[2])
push!(iteration_test_vector,iteration)
end
end
GC.safepoint()
cleanup!(predicted)
end
# Update epoch counter
epoch_idx += 1
# Save model
model_data.model = cpu(model)
save_model(model_name)
end
return nothing
end
function test(model::Chain,accuracy::Function,loss::Function,minibatch_test_channel::Channel,
counter_test,num_test::Int64,move_f,abort)
test_accuracy = Vector{Float32}(undef,num_test)
test_loss = Vector{Float32}(undef,num_test)
local minibatch_test_data::eltype(minibatch_test_channel.data)
for j=1:num_test
local minibatch_test_data::eltype(minibatch_test_channel)
while true
# Abort if needed
if abort[]==true
return [0.f0,0.f0]
end
if isready(minibatch_test_channel)
minibatch_test_data = take!(minibatch_test_channel)
break
else
sleep(0.01)
end
end
# Update test counter
counter_test()
test_minibatch = move_f.(minibatch_test_data)
predicted = model(test_minibatch[1])
actual = test_minibatch[2]
test_accuracy[j] = accuracy(predicted,actual)
test_loss[j] = loss(predicted,actual)
cleanup!(predicted)
end
data = [mean(test_accuracy),mean(test_loss)]
return data
end
function check_lr_change(opt,composite)
if !composite
allow_lr_change = hasproperty(opt, :eta)
else
allow_lr_change = hasproperty(opt2, :eta)
end
return convert(Bool,allow_lr_change)
end
mutable struct Counter
iteration::Int
Counter() = new(0)
end
(c::Counter)() = (c.iteration += 1)
function train!(model_data::ModelData,train_set::Tuple{T1,T2},test_set::Tuple{T1,T2},
opt,accuracy::Function,loss::Function,all_data::AllData,use_GPU::Bool,
num_tests::Float64,args::HyperparametersOptions,channels::Channels,
tasks::Vector{Task}) where {N1,N2,T1<:Vector{Array{Float32,N1}},
T2<:Union{Array{Int32,N2},Vector{Array{Float32,N2}},Vector{BitArray{N2}}}}
# Initialize constants
epochs = Threads.Atomic{Int64}(args.epochs)
batch_size = args.batch_size
accuracy_vector = Vector{Float32}(undef,0)
loss_vector = Vector{Float32}(undef,0)
accuracy_test_vector = Vector{Float32}(undef,0)
loss_test_vector = Vector{Float32}(undef,0)
iteration_test_vector = Vector{Int64}(undef,0)
max_iterations = Threads.Atomic{Int64}(0)
counter = Counter()
counter_test = Counter()
run_test = length(test_set[1])!=0 && num_tests!=0
composite = hasproperty(opt, :os)
allow_lr_change = check_lr_change(opt,composite)
abort = Threads.Atomic{Bool}(false)
testing_mode = Threads.Atomic{Bool}(true)
model_name = string("models/",all_data.Urls.model_name,".model")
# Initialize data
data_input = train_set[1]
data_labels = train_set[2]
num_data = length(data_input)
inds_start,inds_all,num = make_minibatch_inds(num_data,batch_size)
data_input_test = test_set[1]
data_labels_test = test_set[2]
num_data_test = length(data_input_test)
inds_start_test,inds_all_test,num_test = make_minibatch_inds(num_data_test,batch_size)
Threads.atomic_xchg!(max_iterations, epochs[]*num)
# Return epoch information
resize!(accuracy_vector,max_iterations[])
resize!(loss_vector,max_iterations[])
put!(channels.training_start_progress,(epochs[],num,max_iterations[]))
max_labels = length(model_data.classes)
# Make channels
output_N = N2+1
minibatch_channel = Channel{Tuple{Array{Float32,N1+1},Array{Float32,output_N}}}(Inf)
minibatch_test_channel = Channel{Tuple{Array{Float32,N1+1},Array{Float32,output_N}}}(Inf)
# Data preparation thread
t = Threads.@spawn minibatch_part(data_input,data_labels,max_labels,epochs,num,inds_start,
inds_all,counter,run_test,data_input_test,data_labels_test,inds_start_test,
inds_all_test,counter_test,num_test,batch_size,minibatch_channel,minibatch_test_channel,testing_mode,abort)
push!(tasks,t)
# Training thread
if use_GPU
T_out = CuArray{Float32,output_N}
model = Flux.gpu(model_data.model)
move_f = CuArray
else
T_out = Array{Float32,output_N}
model = model_data.model
move_f = Identity()
end
training_part(model_data,model,model_name,opt,accuracy,loss,T_out,move_f,accuracy_vector,
loss_vector,counter,accuracy_test_vector,loss_test_vector,iteration_test_vector,
counter_test,num_test,epochs,num,max_iterations,num_tests,allow_lr_change,composite,
run_test,minibatch_channel,minibatch_test_channel,channels,use_GPU,testing_mode,abort)
# Stop data preparation thread
Threads.atomic_xchg!(abort, true)
# Return training information
resize!(accuracy_vector,counter.iteration)
resize!(loss_vector,counter.iteration)
data = (accuracy_vector,loss_vector,accuracy_test_vector,loss_test_vector,iteration_test_vector)
return data
end
function cleanup!(x::Array)
return nothing
end
function cleanup!(x::CuArray)
CUDA.unsafe_free!(x)
return nothing
end
function get_data_struct(some_data::Union{TrainingData,TestingData})
if problem_type()==:classification
data = some_data.ClassificationData
elseif problem_type()==:regression
data = some_data.RegressionData
else # problem_type()==:segmentation
data = some_data.SegmentationData
end
return data
end
function test_model(model_data,train_set,errors,use_GPU)
input_data_raw = train_set[1][1]
if problem_type()==:classification
max_labels = length(model_data.classes)
label_data = zeros(Float32,max_labels)
ind = train_set[2][1]
label_data[ind] = 1
else
label_data = train_set[2][1]
end
if use_GPU
model = gpu(model_data.model)
input_data = CuArray(add_dim(input_data_raw))
else
model = model_data.model
input_data = add_dim(input_data_raw)
end
output = model(input_data)
size_label = size(label_data)
size_output = size(output)[1:end-1]
if size_label!=size_output
err = string("Label data has size ",size_label," while data returned by the model has size ",size_output,".")
push!(errors,err)
error(err)
return true
else
model_data.input_size = size(input_data)[1:end-1]
model_data.output_size = size_label
return false
end
end
# Main training function
function train_main(model_data::ModelData,all_data::AllData,options::Options,channels::Channels)
# Initialization
GC.gc()
training_data = all_data.TrainingData
testing_data = all_data.TestingData
training_options = options.TrainingOptions
args = training_options.Hyperparameters
use_GPU = false
if options.GlobalOptions.HardwareResources.allow_GPU
if has_cuda()
use_GPU = true
else
warning = "No CUDA capable device was detected. Using CPU instead."
@warn warning
push!(training_data.warnings,warning)
end
end
reset_training_data(training_data)
# Check save directory
if isempty(all_data.Urls.model_url)
all_data.Urls.model_url = "models/new_model.model"
all_data.Urls.model_name = "new_model"
end
# Preparing train and test sets
typed_training_data = get_data_struct(training_data)
typed_testing_data = get_data_struct(testing_data)
normalization = model_data.normalization
norm_func(x) = normalization.f(x,normalization.args...)
train_set, test_set = get_sets(norm_func,typed_training_data,typed_testing_data)
# Data testing
if args.batch_size>length(train_set[1])
err = string("Input data size (",length(train_set[1]),") is smaller than the batch size (",args.batch_size,").")
push!(training_data.errors,err)
error(err)
end
# Model testing
excp = test_model(model_data,train_set,training_data.errors,use_GPU)
if excp
return nothing
end
# Setting functions and parameters
opt = get_optimiser(training_options)
local ws::Vector{Float32}
if training_options.Accuracy.weight_accuracy
if training_options.Accuracy.accuracy_mode==:manual
ws = training_data.weights
l_ws = length(ws)
l_data = size(train_set[2][1])[end]
if l_ws!=l_data
error = string("The number of weights is not equal to the number of channels. ",l_ws," vs ",l_data)
@error error
push!(training_data.errors,error)
return nothing
end
else
ws = get_weights(model_data,typed_training_data)
end
else
ws = Vector{Float32}(undef,0)
end
accuracy = get_accuracy_func(ws,training_options)
loss = model_data.loss
num_tests = training_options.Testing.num_tests
# Run training
data = train!(model_data,train_set,test_set,opt,accuracy,loss,
all_data,use_GPU,num_tests,args,channels,training_data.tasks)
# Clean up
clean_up_training(training_data.PlotData)
# Return training results
training_results_data = training_data.Results
training_results_data.accuracy = data[1]
training_results_data.loss = data[2]
training_results_data.test_accuracy = data[3]
training_results_data.test_loss = data[4]
training_results_data.test_iteration = data[5]
save_model(all_data.Urls.model_url)
return nothing
end
function train_main2(model_data::ModelData,all_data::AllData,options::Options,channels::Channels)
t = Threads.@spawn train_main(model_data,all_data,options,channels)
push!(all_data.TrainingData.tasks,t)
return t
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
|
[
"MIT"
] | 0.2.0 | baa418970a2eb1b5e82efcc75573343e78cf6e20 | code | 1074 |
module Validation
# Import packages
using
# Interfacing
CxxWrap, CUDA,
# Data import/export
FileIO, ImageIO, XLSX, CSVFiles,
# Data manipulation
Unicode, DataFrames,
# Image manipulation
Images, ColorTypes,
# Machine learning
Flux, Flux.Losses, FluxExtra,
# Math functions
Random, StatsBase, LinearAlgebra,
# Other
FLoops,
# EasyML ecosystem
..Common, ..Common.Classes, ..Common.Validation
import CUDA.CuArray, StatsBase.std
import ..Classes
import ..Classes: change_classes, num_classes, get_class_field, get_class_data,
get_problem_type, get_input_type
# Include functions
include(string(common_dir(),"/common/training_validation.jl"))
include(string(common_dir(),"/common/validation_application.jl"))
include(string(common_dir(),"/common/preparation_validation.jl"))
include(string(common_dir(),"/common/preparation_validation_application.jl"))
include("main.jl")
include("exported_functions.jl")
export validation_options, ValidationOptions, validation_results_data
export get_urls_validation, validate, remove_validation_data, remove_validation_results
end
| EasyML | https://github.com/OML-NPA/EasyML.jl.git |
Subsets and Splits