licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | code | 4152 | # run this script to update the database
# please open a pull request at
# https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl/pulls
using DataFrames
using Dates
using JLD2
using MAT
const colnames = Dict(
"Group" => "group",
"nnzdiag" => "nnzdiag",
"nrows" => "nrows",
"numerical_symmetry" => "numerical_symmetry",
"amd_vnz" => "amd_vnz",
"isBinary" => "binary",
"sprank" => "structural_rank",
"isND" => "is_nd",
"isGraph" => "is_graph",
"RBtype" => "RB_type",
"lowerbandwidth" => "lower_bandwidth",
"nzero" => "explicit_zeros",
"amd_flops" => "amd_flops",
"xmin" => "xmin",
"posdef" => "positive_definite",
"ncc" => "connected_components",
"Name" => "name",
"amd_rnz" => "amd_rnz",
"nentries" => "pattern_entries",
"ncols" => "ncols",
"rcm_lowerbandwidth" => "rcm_lower_bandwidth",
"amd_lnz" => "amd_lnz",
"nnz" => "nnz",
"pattern_symmetry" => "pattern_symmetry",
"rcm_upperbandwidth" => "rcm_upper_bandwidth",
"upperbandwidth" => "upper_bandwidth",
"cholcand" => "cholesky_candidate",
"xmax" => "xmax",
"isReal" => "real",
"nblocks" => "nblocks",
)
const coltypes = Dict(
"group" => String,
"nnzdiag" => Int,
"nrows" => Int,
"numerical_symmetry" => Float64,
"amd_vnz" => Int,
"binary" => Bool,
"structural_rank" => Int,
"is_nd" => Bool,
"is_graph" => Bool,
"RB_type" => String,
"lower_bandwidth" => Int,
"explicit_zeros" => Int,
"amd_flops" => Float64,
"xmin" => Complex{Float64},
"positive_definite" => Bool,
"connected_components" => Int,
"name" => String,
"amd_rnz" => Int,
"pattern_entries" => Int,
"ncols" => Int,
"rcm_lower_bandwidth" => Int,
"amd_lnz" => Int,
"nnz" => Int,
"pattern_symmetry" => Float64,
"rcm_upper_bandwidth" => Int,
"upper_bandwidth" => Int,
"cholesky_candidate" => Bool,
"xmax" => Complex{Float64},
"real" => Bool,
"nblocks" => Int,
)
const ssmc_url = "https://sparse.tamu.edu"
const ssmc_mat = "ss_index.mat"
const ssmc_db = joinpath(@__DIR__, ssmc_mat)
const mat_url = "http://sparse.tamu.edu/files/$ssmc_mat"
try
@info "attempting to download updated SuiteSparse database"
download(mat_url, ssmc_db)
catch e
@error e
error("unable to download SuiteSparse database")
end
matdata = matread(ssmc_db)
ss_index = matdata["ss_index"]
download_time = ss_index["DownloadTimeStamp"]
last_rev_date = ss_index["LastRevisionDate"]
ssmc_jld2 = joinpath(@__DIR__, "..", "src", "ssmc.jld2")
update = false
const dfmt = DateFormat("dd-uuu-yyyy HH:MM:SS")
last_rev = DateTime(last_rev_date, dfmt)
if isfile(ssmc_jld2)
file = jldopen(ssmc_jld2, "r")
last_rev_date_on_file = file["last_rev_date"]
close(file)
last_rev_on_file = DateTime(last_rev_date_on_file, dfmt)
update = last_rev > last_rev_on_file
else
update = true
end
function to_vec(x, T)
nd = ndims(x)
nd > 2 && error("too many dims!")
nd == 1 && (return x)
y = T[]
for val β x
push!(y, val)
end
return y
end
if update
@info "updating database to revision of" last_rev
df = DataFrame()
for (k, v) β ss_index
k β ("DownloadTimeStamp", "LastRevisionDate") && continue
colname = colnames[k]
setproperty!(df, colname, to_vec(v, coltypes[colname]))
end
df = df[
!,
[
:group,
:name,
:nrows,
:ncols,
:nnz,
:structural_rank,
:numerical_symmetry,
:positive_definite,
:pattern_symmetry,
:binary,
:real,
:nnzdiag,
:xmin,
:xmax,
:lower_bandwidth,
:upper_bandwidth,
:rcm_lower_bandwidth,
:rcm_upper_bandwidth,
:amd_vnz,
:amd_lnz,
:amd_rnz,
:explicit_zeros,
:pattern_entries,
:cholesky_candidate,
:connected_components,
:nblocks,
:amd_flops,
:RB_type,
:is_graph,
:is_nd,
],
]
jldopen(ssmc_jld2, "w") do file
file["df"] = df
file["last_rev_date"] = last_rev_date
file["download_time"] = download_time
end
@info "please open a pull request at https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl/pulls"
else
@info "database is already up to date"
end
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |
|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | docs | 6655 | # SuiteSparseMatrixCollection.jl
| **Documentation** | **Linux/macOS/Windows/FreeBSD** | **Coverage** | **DOI** |
|:-----------------:|:-------------------------------:|:------------:|:-------:|
| [![docs-stable][docs-stable-img]][docs-stable-url] [![docs-dev][docs-dev-img]][docs-dev-url] | [![build-gh][build-gh-img]][build-gh-url] [![build-cirrus][build-cirrus-img]][build-cirrus-url] | [![codecov][codecov-img]][codecov-url] | [![doi][doi-img]][doi-url] |
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://JuliaSmoothOptimizers.github.io/SuiteSparseMatrixCollection.jl/stable
[docs-dev-img]: https://img.shields.io/badge/docs-dev-purple.svg
[docs-dev-url]: https://JuliaSmoothOptimizers.github.io/SuiteSparseMatrixCollection.jl/dev
[build-gh-img]: https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl/workflows/CI/badge.svg?branch=main
[build-gh-url]: https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl/actions
[build-cirrus-img]: https://img.shields.io/cirrus/github/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl?logo=Cirrus%20CI
[build-cirrus-url]: https://cirrus-ci.com/github/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl
[codecov-img]: https://codecov.io/gh/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl/branch/main/graph/badge.svg
[codecov-url]: https://app.codecov.io/gh/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl
[doi-img]: https://img.shields.io/badge/DOI-10.5281%2Fzenodo.4324340-blue.svg
[doi-url]: https://doi.org/10.5281/zenodo.4324340
A straightforward interface to the [SuiteSparse Matrix Collection](https://sparse.tamu.edu/).
## References
> Davis, Timothy A. and Hu, Yifan (2011).
> The University of Florida sparse matrix collection.
> ACM Transactions on Mathematical Software, 38(1), 1--25.
> [10.1145/2049662.2049663](https://doi.org/10.1145/2049662.2049663)
## How to Cite
If you use SuiteSparseMatrixCollection.jl in your work, please cite using the format given in [CITATION.bib](https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl/blob/main/CITATION.bib).
## How to install
```julia
pkg> add SuiteSparseMatrixCollection
pkg> test SuiteSparseMatrixCollection
```
## Updating the database
Clone this repository, activate the `utils` environment and run `gen_db.jl` to check if the database needs to be updated.
## Updating `Artifacts.toml`
Clone this repository, activate the `utils` environment and run `gen_artifacts.jl` to check if `Artifacts.toml` needs to be updated.
## Examples
```julia
julia> using SuiteSparseMatrixCollection
julia> # name-based selection can be done with `ssmc_matrices()`
julia> ssmc = ssmc_db()
julia> ssmc_matrices(ssmc, "HB", "bcsstk") # all matrices whose group contains "HB" and name contains "bcsstk"
julia> ssmc_matrices(ssmc, "", "bcsstk") # all matrices whose name contains "bcsstk"
julia> ssmc_matrices(ssmc, "HB", "") # all matrices whose group contains "HB"
julia> # select symmetric positive definite matrices with β€ 100 rows and columns
julia> tiny = ssmc[(ssmc.numerical_symmetry .== 1) .& (ssmc.positive_definite.== true) .&
(ssmc.real .== true) .& (ssmc.nrows .β€ 100), :]
julia> # fetch the matrices selects in MatrixMarket format
julia> paths = fetch_ssmc(tiny, format="MM") # matrices are downloaded in paths
julia> downloaded_matrices = installed_ssmc() # name of all downloaded matrices
julia> delete_ssmc("HB", "bcsstk02") # delete the matrix "bcsstk02" of group "HB"
julia> delete_all_ssmc() # delete all matrices from the SuiteSparseMatrixCollection
```
Matrices are available in formats:
* `"RB"`: the [Rutherford-Boeing format](https://www.cise.ufl.edu/research/sparse/matrices/DOC/rb.pdf);
* `"MM"`: the [MatrixMarket format](http://math.nist.gov/MatrixMarket/formats.html#MMformat).
Use `DataFrames` syntax to further examine a list of selected matrices:
```julia
julia> tiny[!, [:name, :nrows, :ncols, :positive_definite, :lower_bandwidth]]
12Γ5 DataFrame
β Row β name β nrows β ncols β positive_definite β lower_bandwidth β
β β String β Int64 β Int64 β Bool β Int64 β
βββββββΌββββββββββββββββΌββββββββΌββββββββΌββββββββββββββββββββΌββββββββββββββββββ€
β 1 β bcsstk01 β 48 β 48 β 1 β 35 β
β 2 β bcsstk02 β 66 β 66 β 1 β 65 β
β 3 β bcsstm02 β 66 β 66 β 1 β 0 β
β 4 β nos4 β 100 β 100 β 1 β 13 β
β 5 β ex5 β 27 β 27 β 1 β 20 β
β 6 β mesh1e1 β 48 β 48 β 1 β 47 β
β 7 β mesh1em1 β 48 β 48 β 1 β 47 β
β 8 β mesh1em6 β 48 β 48 β 1 β 47 β
β 9 β LF10 β 18 β 18 β 1 β 3 β
β 10 β LFAT5 β 14 β 14 β 1 β 5 β
β 11 β Trefethen_20b β 19 β 19 β 1 β 16 β
β 12 β Trefethen_20 β 20 β 20 β 1 β 16 β
```
Matrices in Rutherford-Boeing format can be opened with [`HarwellRutherfordBoeing.jl`](https://github.com/JuliaSparse/HarwellRutherfordBoeing.jl):
```julia
pkg> add HarwellRutherfordBoeing
julia> using HarwellRutherfordBoeing
julia> matrix = ssmc[ssmc.name .== "bcsstk01", :]
1Γ30 DataFrame. Omitted printing of 17 columns
β Row β group β nnzdiag β nrows β numerical_symmetry β amd_vnz β binary β structural_rank β is_nd β is_graph β RB_type β lower_bandwidth β explicit_zeros β amd_flops β
β β String β Int64 β Int64 β Float64 β Int64 β Bool β Int64 β Bool β Bool β String β Int64 β Int64 β Float64 β
βββββββΌβββββββββΌββββββββββΌββββββββΌβββββββββββββββββββββΌββββββββββΌβββββββββΌββββββββββββββββββΌββββββββΌβββββββββββΌββββββββββΌββββββββββββββββββΌβββββββββββββββββΌββββββββββββ€
β 1 β HB β 48 β 48 β 1.0 β 651 β 0 β 48 β 1 β 0 β rsa β 35 β 0 β 6009.0 β
julia> path = fetch_ssmc(matrix, format="RB")
1-element Array{String,1}:
"/Users/dpo/dev/JSO/SuiteSparseMatrixCollection.jl/src/../data/RB/HB/bcsstk01"
julia> A = RutherfordBoeingData(joinpath(path[1], "$(matrix.name[1]).rb"))
Rutherford-Boeing data 23 of type rsa
48 rows, 48 cols, 224 nonzeros
```
Matrices in MM format can be opened with [`MatrixMarket.jl`](https://github.com/JuliaSparse/MatrixMarket.jl).
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |
|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | docs | 1414 | # [SuiteSparseMatrixCollection.jl documentation](@id Home)
A straightforward interface to the [SuiteSparse Matrix Collection](https://sparse.tamu.edu/).
## How to install
```julia
pkg> add SuiteSparseMatrixCollection
pkg> test SuiteSparseMatrixCollection
```
## Examples
```julia
julia> using SuiteSparseMatrixCollection
julia> # name-based selection can be done with `ssmc_matrices()`
julia> ssmc = ssmc_db() # the database is named ssmc
julia> ssmc_matrices(ssmc, "HB", "bcsstk") # all matrices whose group contains "HB" and name contains "bcsstk"
julia> ssmc_matrices(ssmc, "", "bcsstk") # all matrices whose name contains "bcsstk"
julia> ssmc_matrices(ssmc, "HB", "") # all matrices whose group contains "HB"
julia> # select symmetric positive definite matrices with β€ 100 rows and columns
julia> tiny = ssmc[(ssmc.numerical_symmetry .== 1) .& (ssmc.positive_definite.== true) .&
(ssmc.real .== true) .& (ssmc.nrows .β€ 100), :]
julia> # fetch the matrices selects in MatrixMarket format
julia> paths = fetch_ssmc(tiny, format="MM") # matrices are downloaded in paths
julia> downloaded_matrices = installed_ssmc() # name of all downloaded matrices
julia> delete_ssmc("HB", "bcsstk02") # delete the matrix "bcsstk02" of group "HB"
julia> delete_all_ssmc() # delete all matrices from the SuiteSparseMatrixCollection
```
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |
|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | docs | 176 | # Reference
## Contents
```@contents
Pages = ["reference.md"]
```
## Index
```@index
Pages = ["reference.md"]
```
```@autodocs
Modules = [SuiteSparseMatrixCollection]
```
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 2433 | module GraphIODOTExt
using Graphs
import Graphs: loadgraph, loadgraphs, savegraph
@static if isdefined(Base, :get_extension)
using GraphIO
using ParserCombinator
import GraphIO.DOT.DOTFormat
else # not required for julia >= v1.9
using ..GraphIO
using ..ParserCombinator
import ..GraphIO.DOT.DOTFormat
end
function savedot(io::IO, g::AbstractGraph, gname::String="")
isdir = is_directed(g)
println(io, (isdir ? "digraph " : "graph ") * gname * " {")
for i in vertices(g)
println(io, "\t" * string(i))
end
if isdir
for u in vertices(g)
out_nbrs = outneighbors(g, u)
length(out_nbrs) == 0 && continue
println(io, "\t" * string(u) * " -> {" * join(out_nbrs, ',') * "}")
end
else
for e in edges(g)
source = string(src(e))
dest = string(dst(e))
println(io, "\t" * source * " -- " * dest)
end
end
println(io, "}")
return 1
end
function savedot_mult(io::IO, graphs::Dict)
ng = 0
for (gname, g) in graphs
ng += savedot(io, g, gname)
end
return ng
end
function _dot_read_one_graph(pg::Parsers.DOT.Graph)
isdir = pg.directed
nvg = length(Parsers.DOT.nodes(pg))
nodedict = Dict(zip(collect(Parsers.DOT.nodes(pg)), 1:nvg))
if isdir
g = DiGraph(nvg)
else
g = Graph(nvg)
end
for es in Parsers.DOT.edges(pg)
s = nodedict[es[1]]
d = nodedict[es[2]]
add_edge!(g, s, d)
end
return g
end
function _name(pg::Parsers.DOT.Graph)
return if pg.id !== nothing
pg.id.id
else
Parsers.DOT.StringID(pg.directed ? "digraph" : "graph")
end
end
function loaddot(io::IO, gname::String)
p = Parsers.DOT.parse_dot(read(io, String))
for pg in p
_name(pg) == gname && return _dot_read_one_graph(pg)
end
return error("Graph $gname not found")
end
function loaddot_mult(io::IO)
p = Parsers.DOT.parse_dot(read(io, String))
graphs = Dict{String,AbstractGraph}()
for pg in p
graphs[_name(pg)] = _dot_read_one_graph(pg)
end
return graphs
end
loadgraph(io::IO, gname::String, ::DOTFormat) = loaddot(io, gname)
loadgraphs(io::IO, ::DOTFormat) = loaddot_mult(io)
savegraph(io::IO, g::AbstractGraph, gname::String, ::DOTFormat) = savedot(io, g, gname)
savegraph(io::IO, d::Dict, ::DOTFormat) = savedot_mult(io, d)
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 1585 | module GraphIOGEXFExt
using Graphs
import Graphs: loadgraph, loadgraphs, savegraph, AbstractGraph
@static if isdefined(Base, :get_extension)
using GraphIO
using EzXML
import GraphIO.GEXF.GEXFFormat
else # not required for julia >= v1.9
using ..GraphIO
using ..EzXML
import ..GraphIO.GEXF.GEXFFormat
end
"""
savegexf(f, g, gname)
Write a graph `g` with name `gname` to an IO stream `io` in the
[Gexf](http://gexf.net/format/) format. Return 1 (number of graphs written).
"""
function savegexf(io::IO, g::AbstractGraph, gname::String)
xdoc = XMLDocument()
xroot = setroot!(xdoc, ElementNode("gexf"))
xroot["xmlns"] = "http://www.gexf.net/1.2draft"
xroot["version"] = "1.2"
xroot["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
xroot["xsi:schemaLocation"] = "http://www.gexf.net/1.2draft/gexf.xsd"
xmeta = addelement!(xroot, "meta")
addelement!(xmeta, "description", gname)
xg = addelement!(xroot, "graph")
strdir = is_directed(g) ? "directed" : "undirected"
xg["defaultedgetype"] = strdir
xnodes = addelement!(xg, "nodes")
for i in 1:nv(g)
xv = addelement!(xnodes, "node")
xv["id"] = "$(i-1)"
end
xedges = addelement!(xg, "edges")
m = 0
for e in edges(g)
xe = addelement!(xedges, "edge")
xe["id"] = "$m"
xe["source"] = "$(src(e)-1)"
xe["target"] = "$(dst(e)-1)"
m += 1
end
prettyprint(io, xdoc)
return 1
end
savegraph(io::IO, g::AbstractGraph, gname::String, ::GEXFFormat) = savegexf(io, g, gname)
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 2785 | module GraphIOGMLExt
using Graphs
import Graphs: loadgraph, loadgraphs, savegraph
@static if isdefined(Base, :get_extension)
using GraphIO
using ParserCombinator
import GraphIO.GML.GMLFormat
else # not required for julia >= v1.9
using ..GraphIO
using ..ParserCombinator
import ..GraphIO.GML.GMLFormat
end
function _gml_read_one_graph(gs, dir)
nodes = [x[:id] for x in gs[:node]]
if dir
g = DiGraph(length(nodes))
else
g = Graph(length(nodes))
end
mapping = Dict{Int,Int}()
for (i, n) in enumerate(nodes)
mapping[n] = i
end
sds = [(Int(x[:source]), Int(x[:target])) for x in gs[:edge]]
for (s, d) in (sds)
add_edge!(g, mapping[s], mapping[d])
end
return g
end
function loadgml(io::IO, gname::String)
p = Parsers.GML.parse_dict(read(io, String))
for gs in p[:graph]
dir = Bool(get(gs, :directed, 0))
graphname = get(gs, :label, dir ? "digraph" : "graph")
(gname == graphname) && return _gml_read_one_graph(gs, dir)
end
return error("Graph $gname not found")
end
function loadgml_mult(io::IO)
p = Parsers.GML.parse_dict(read(io, String))
graphs = Dict{String,AbstractGraph}()
for gs in p[:graph]
dir = Bool(get(gs, :directed, 0))
graphname = get(gs, :label, dir ? "digraph" : "graph")
graphs[graphname] = _gml_read_one_graph(gs, dir)
end
return graphs
end
"""
savegml(f, g, gname="graph")
Write a graph `g` with name `gname` to an IO stream `io` in the
[GML](https://en.wikipedia.org/wiki/Graph_Modelling_Language) format. Return 1.
"""
function savegml(io::IO, g::AbstractGraph, gname::String="")
println(io, "graph")
println(io, "[")
length(gname) > 0 && println(io, "label \"$gname\"")
is_directed(g) && println(io, "directed 1")
for i in 1:nv(g)
println(io, "\tnode")
println(io, "\t[")
println(io, "\t\tid $i")
println(io, "\t]")
end
for e in edges(g)
s, t = Tuple(e)
println(io, "\tedge")
println(io, "\t[")
println(io, "\t\tsource $s")
println(io, "\t\ttarget $t")
println(io, "\t]")
end
println(io, "]")
return 1
end
"""
savegml_mult(io, graphs)
Write a dictionary of (name=>graph) to an IO stream `io` Return number of graphs written.
"""
function savegml_mult(io::IO, graphs::Dict)
ng = 0
for (gname, g) in graphs
ng += savegml(io, g, gname)
end
return ng
end
loadgraph(io::IO, gname::String, ::GMLFormat) = loadgml(io, gname)
loadgraphs(io::IO, ::GMLFormat) = loadgml_mult(io)
savegraph(io::IO, g::AbstractGraph, gname::String, ::GMLFormat) = savegml(io, g, gname)
savegraph(io::IO, d::Dict, ::GMLFormat) = savegml_mult(io, d)
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 4919 | module GraphIOGraphMLExt
using Graphs
import Graphs: loadgraph, loadgraphs, savegraph
@static if isdefined(Base, :get_extension)
using GraphIO
using EzXML
import GraphIO.GraphML.GraphMLFormat
else # not required for julia >= v1.9
using ..GraphIO
using ..EzXML
import ..GraphIO.GraphML.GraphMLFormat
end
function _graphml_read_one_graph(reader::EzXML.StreamReader, isdirected::Bool)
nodes = Dict{String,Int}()
xedges = Vector{Edge}()
nodeid = 1
for typ in reader
if typ == EzXML.READER_ELEMENT
elname = EzXML.nodename(reader)
if elname == "node"
nodes[reader["id"]] = nodeid
nodeid += 1
elseif elname == "edge"
src = reader["source"]
tar = reader["target"]
push!(xedges, Edge(nodes[src], nodes[tar]))
else
@warn "Skipping unknown node '$(elname)' - further warnings will be suppressed" maxlog =
1 _id = :unknode
end
end
end
g = (isdirected ? DiGraph : Graph)(length(nodes))
for edge in xedges
add_edge!(g, edge)
end
return g
end
function loadgraphml(io::IO, gname::String)
reader = EzXML.StreamReader(io)
for typ in reader
if typ == EzXML.READER_ELEMENT
elname = EzXML.nodename(reader)
if elname == "graphml"
# ok
elseif elname == "graph"
edgedefault = reader["edgedefault"]
directed = if edgedefault == "directed"
true
elseif edgedefault == "undirected"
false
else
error("Unknown value of edgedefault: $edgedefault")
end
if haskey(reader, "id")
graphname = reader["id"]
else
graphname = directed ? "digraph" : "graph"
end
if gname == graphname
return _graphml_read_one_graph(reader, directed)
end
elseif elname == "node" || elname == "edge"
# ok
else
@warn "Skipping unknown XML element '$(elname)' - further warnings will be suppressed" maxlog =
1 _id = :unkel
end
end
end
return error("Graph $gname not found")
end
function loadgraphml_mult(io::IO)
reader = EzXML.StreamReader(io)
graphs = Dict{String,AbstractGraph}()
for typ in reader
if typ == EzXML.READER_ELEMENT
elname = EzXML.nodename(reader)
if elname == "graphml"
# ok
elseif elname == "graph"
edgedefault = reader["edgedefault"]
directed = if edgedefault == "directed"
true
elseif edgedefault == "undirected"
false
else
error("Unknown value of edgedefault: $edgedefault")
end
if haskey(reader, "id")
graphname = reader["id"]
else
graphname = directed ? "digraph" : "graph"
end
graphs[graphname] = _graphml_read_one_graph(reader, directed)
else
@warn "Skipping unknown XML element '$(elname)' - further warnings will be suppressed" maxlog =
1 _id = :unkelmult
end
end
end
return graphs
end
function savegraphml_mult(io::IO, graphs::Dict)
xdoc = XMLDocument()
xroot = setroot!(xdoc, ElementNode("graphml"))
xroot["xmlns"] = "http://graphml.graphdrawing.org/xmlns"
xroot["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
xroot["xsi:schemaLocation"] = "http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"
for (gname, g) in graphs
xg = addelement!(xroot, "graph")
xg["id"] = gname
xg["edgedefault"] = is_directed(g) ? "directed" : "undirected"
for i in 1:nv(g)
xv = addelement!(xg, "node")
xv["id"] = "n$(i-1)"
end
m = 0
for e in edges(g)
xe = addelement!(xg, "edge")
xe["id"] = "e$m"
xe["source"] = "n$(src(e)-1)"
xe["target"] = "n$(dst(e)-1)"
m += 1
end
end
prettyprint(io, xdoc)
return length(graphs)
end
function savegraphml(io::IO, g::AbstractGraph, gname::String)
return savegraphml_mult(io, Dict(gname => g))
end
loadgraph(io::IO, gname::String, ::GraphMLFormat) = loadgraphml(io, gname)
loadgraphs(io::IO, ::GraphMLFormat) = loadgraphml_mult(io)
function savegraph(io::IO, g::AbstractGraph, gname::String, ::GraphMLFormat)
return savegraphml(io, g, gname)
end
savegraph(io::IO, d::Dict, ::GraphMLFormat) = savegraphml_mult(io, d)
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 1578 | module GraphIOLGCompressedExt
using Graphs
import Graphs: loadgraph, loadgraphs, savegraph, LGFormat
@static if isdefined(Base, :get_extension)
using GraphIO
using CodecZlib
import GraphIO.LGCompressed.LGCompressedFormat
else # not required for julia >= v1.9
using ..GraphIO
using ..CodecZlib
import ..GraphIO.LGCompressed.LGCompressedFormat
end
function savegraph(
fn::AbstractString, g::AbstractGraph, gname::AbstractString, format::LGCompressedFormat
)
io = open(fn, "w")
try
io = GzipCompressorStream(io)
return savegraph(io, g, gname, LGFormat())
catch
rethrow()
finally
close(io)
end
end
function savegraph(fn::AbstractString, g::AbstractGraph, format::LGCompressedFormat)
return savegraph(fn, g, "graph", format)
end
function savegraph(
fn::AbstractString, d::Dict{T,U}, format::LGCompressedFormat
) where {T<:AbstractString} where {U<:AbstractGraph}
io = open(fn, "w")
try
io = GzipCompressorStream(io)
return savegraph(io, d, LGFormat())
catch
rethrow()
finally
close(io)
end
end
# savegraph(fn::AbstractString, d::Dict; compress) = savegraph(fn, d, LGCompressedFormat())
function loadgraph(fn::AbstractString, gname::AbstractString, format::LGCompressedFormat)
return loadgraph(fn, gname, LGFormat())
end
function loadgraph(fn::AbstractString, format::LGCompressedFormat)
return loadgraph(fn, "graph", LGFormat())
end
loadgraphs(fn::AbstractString, format::LGCompressedFormat) = loadgraphs(fn, LGFormat())
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 922 | module GraphIO
@static if !isdefined(Base, :get_extension)
using Requires
end
@static if !isdefined(Base, :get_extension)
function __init__()
@require CodecZlib = "944b1d66-785c-5afd-91f1-9de20f533193" begin
include("../ext/GraphIOLGCompressedExt.jl")
end
@require EzXML = "8f5d6c58-4d21-5cfd-889c-e3ad7ee6a615" begin
include("../ext/GraphIOGEXFExt.jl")
include("../ext/GraphIOGraphMLExt.jl")
end
@require ParserCombinator = "fae87a5f-d1ad-5cf0-8f61-c941e1580b46" begin
include("../ext/GraphIODOTExt.jl")
include("../ext/GraphIOGMLExt.jl")
end
end
end
include("CDF/Cdf.jl")
include("DOT/Dot.jl")
include("Edgelist/Edgelist.jl")
include("GEXF/Gexf.jl")
include("GML/Gml.jl")
include("GraphML/GraphML.jl")
include("Graph6/Graph6.jl")
include("LGCompressed/LGCompressed.jl")
include("NET/Net.jl")
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 1673 | module CDF
# loads a graph from a IEEE CDF file.
# http://www2.ee.washington.edu/research/pstca/formats/cdf.txt
# http://www2.ee.washington.edu/research/pstca/pf30/ieee30cdf.txt
using Graphs
using Graphs: AbstractGraphFormat
import Graphs: loadgraph, loadgraphs, savegraph
export CDFFormat
struct CDFFormat <: AbstractGraphFormat end
function _loadcdf(io::IO)
srcs = Vector{Int}()
dsts = Vector{Int}()
vertices = Vector{Int}()
inbusdata = false
inbranchdata = false
while !eof(io)
line = strip(chomp(readline(io)))
if inbusdata
if occursin("-999", line)
inbusdata = false
else
v = parse(Int, split(line)[1])
push!(vertices, v)
end
elseif inbranchdata
if occursin("-999", line)
inbranchdata = false
else
(src_s, dst_s) = split(line)[1:2]
src = something(findfirst(isequal(parse(Int, src_s)), vertices), 0)
dst = something(findfirst(isequal(parse(Int, dst_s)), vertices), 0)
push!(srcs, src)
push!(dsts, dst)
end
else
inbusdata = startswith(line, "BUS DATA FOLLOWS")
inbranchdata = startswith(line, "BRANCH DATA FOLLOWS")
end
end
n_v = length(vertices)
g = Graphs.Graph(n_v)
for p in zip(srcs, dsts)
add_edge!(g, p)
end
return g
end
loadcdf(io::IO, gname::String) = _loadcdf(io)
loadgraph(io::IO, gname::String, ::CDFFormat) = loadcdf(io, gname)
loadgraphs(io::IO, ::CDFFormat) = Dict("graph" => loadcdf(io, "graph"))
end # module
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 123 | module DOT
import Graphs: AbstractGraphFormat
export DOTFormat
struct DOTFormat <: AbstractGraphFormat end
end #module
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 1665 | module EdgeList
# loads a graph from an edge list format (list of srcs and dsts separated
# by commas or whitespace. Will only read the first two elements on
# each line. Will return a directed graph.
using DelimitedFiles: writedlm
using Graphs
using Graphs: AbstractGraphFormat
import Graphs: loadgraph, loadgraphs, savegraph
export EdgeListFormat
struct EdgeListFormat <: AbstractGraphFormat end
function loadedgelist(io::IO, gname::String)
srcs = Vector{String}()
dsts = Vector{String}()
while !eof(io)
line = strip(chomp(readline(io)))
if !startswith(line, "#") && (line != "")
# println("linelength = $(length(line)), line = $line")
r = r"(\w+)[\s,]+(\w+)"
src_s, dst_s = match(r, line).captures
# println("src_s = $src_s, dst_s = $dst_s")
push!(srcs, src_s)
push!(dsts, dst_s)
end
end
vxset = unique(vcat(srcs, dsts))
vxdict = Dict{String,Int}()
for (v, k) in enumerate(vxset)
vxdict[k] = v
end
n_v = length(vxset)
g = Graphs.DiGraph(n_v)
for (u, v) in zip(srcs, dsts)
add_edge!(g, vxdict[u], vxdict[v])
end
return g
end
function saveedgelist(io::IO, g::Graphs.AbstractGraph, gname::String)
writedlm(io, ([src(e), dst(e)] for e in Graphs.edges(g)), ',')
return 1
end
loadgraph(io::IO, gname::String, ::EdgeListFormat) = loadedgelist(io, gname)
loadgraphs(io::IO, ::EdgeListFormat) = Dict("graph" => loadedgelist(io, "graph"))
function savegraph(io::IO, g::AbstractGraph, gname::String, ::EdgeListFormat)
return saveedgelist(io, g, gname)
end
include("IntEdgeList.jl")
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 1523 | export IntEdgeListFormat
struct IntEdgeListFormat <: AbstractGraphFormat
offset::Int64
end
IntEdgeListFormat(; offset=0) = IntEdgeListFormat(offset)
function loadintedgelist(io::IO, gname::String, offset::Int64)
elist = Vector{Tuple{Int64,Int64}}()
nvg = 0
neg = 0
fadjlist = Vector{Vector{Int64}}()
for x in eachline(io)
i = 1
while x[i] != ' ' && x[i] != ','
i += 1
end
s = parse(Int64, x[1:(i - 1)])
while x[i] == ' ' || x[i] == ','
i += 1
end
ii = i
while i <= length(x) && x[i] != ' '
i += 1
end
d = parse(Int64, x[ii:(i - 1)])
s = s - offset
d = d - offset
if nvg < max(s, d)
nvg = max(s, d)
append!(fadjlist, [Vector{Int64}() for _ in 1:(nvg - length(fadjlist))])
end
push!(fadjlist[s], d)
neg += 1
end
sort!.(fadjlist)
badjlist = [Vector{Int64}() for _ in 1:nvg]
for u in 1:nvg
for v in fadjlist[u]
push!(badjlist[v], u)
end
end
return Graphs.DiGraph(neg, fadjlist, badjlist)
end
function loadgraph(io::IO, gname::String, fmt::IntEdgeListFormat)
return loadintedgelist(io, gname, fmt.offset)
end
function loadgraphs(io::IO, fmt::IntEdgeListFormat)
return Dict("graph" => loadintedgelist(io, "graph", fmt.offset))
end
function savegraph(io::IO, g::AbstractGraph, gname::String, ::IntEdgeListFormat)
return saveedgelist(io, g, gname)
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 153 | module GEXF
import Graphs: AbstractGraphFormat
export GEXFFormat
# TODO: implement readgexf
struct GEXFFormat <: AbstractGraphFormat end
end #module
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 124 | module GML
import Graphs: AbstractGraphFormat
export GMLFormat
struct GMLFormat <: AbstractGraphFormat end
end # module
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 4191 | module Graph6
using SimpleTraits
using Graphs
using Graphs: AbstractGraphFormat
import Graphs: loadgraph, loadgraphs, savegraph
export Graph6Format
struct Graph6Format <: AbstractGraphFormat end
function _bv2int(x::BitVector)
@assert(length(x) <= 8 * sizeof(Int))
acc = 0
for i in 1:length(x)
acc = acc << 1 + x[i]
end
return acc
end
function _int2bv(n::Int, k::Int)
bitstr = lstrip(bitstring(n), '0')
l = length(bitstr)
padding = k - l
bv = falses(k)
for i in 1:l
bv[padding + i] = (bitstr[i] == '1')
end
return bv
end
function _g6_R(_x::BitVector)::Vector{UInt8}
k = length(_x)
padding = cld(k, 6) * 6 - k
x = vcat(_x, falses(padding))
nbytes = div(length(x), 6)
bytevec = Vector{UInt8}(undef, nbytes) # uninitialized data!
for i in 1:nbytes
xslice = x[((i - 1) * 6 + 1):(i * 6)]
intslice = 0
for bit in xslice
intslice = (intslice << 1) + bit
end
intslice += 63
bytevec[i] = intslice
end
return UInt8.(bytevec)
end
_g6_R(n::Int, k::Int) = _g6_R(_int2bv(n, k))
function _g6_Rp(bytevec::Vector{UInt8})
nbytes = length(bytevec)
x = BitVector()
for byte in bytevec
bits = _int2bv(byte - 63, 6)
x = vcat(x, bits)
end
return x
end
function _g6_N(x::Integer)::Vector{UInt8}
if (x < 0) || (x > 68719476735)
error("x must satisfy 0 <= x <= 68719476735")
elseif (x <= 62)
nvec = [x + 63]
elseif (x <= 258047)
nvec = vcat([0x7e], _g6_R(x, 18))
else
nvec = vcat([0x7e; 0x7e], _g6_R(x, 36))
end
return UInt8.(nvec)
end
function _g6_Np(N::Vector{UInt8})
if N[1] < 0x7e
return (Int(N[1] - 63), N[2:end])
elseif N[2] < 0x7e
return (_bv2int(_g6_Rp(N[2:4])), N[5:end])
else
return (_bv2int(_g6_Rp(N[3:8])), N[9:end])
end
end
"""
_graphToG6String(g)
Given a graph `g`, create the corresponding Graph6 string.
"""
function _graphToG6String(g::Graphs.Graph)
A = adjacency_matrix(g, Bool)
n = nv(g)
nbits = div(n * (n - 1), 2)
x = BitVector(undef, nbits)
ind = 0
for col in 2:n, row in 1:(col - 1)
ind += 1
x[ind] = A[row, col]
end
return join([">>graph6<<", String(_g6_N(n)), String(_g6_R(x))])
end
function _g6StringToGraph(s::AbstractString)
if startswith(s, ">>graph6<<")
s = s[11:end]
end
V = Vector{UInt8}(s)
(nv, rest) = _g6_Np(V)
bitvec = _g6_Rp(rest)
g = Graphs.Graph(nv)
n = 0
for i in 2:nv, j in 1:(i - 1)
n += 1
if bitvec[n]
add_edge!(g, j, i)
end
end
return g
end
function loadgraph6_mult(io::IO)
n = 0
graphdict = Dict{String,Graphs.Graph}()
while !eof(io)
n += 1
line = strip(chomp(readline(io)))
gname = "graph$n"
if length(line) > 0
g = _g6StringToGraph(line)
graphdict[gname] = g
end
end
return graphdict
end
"""
loadgraph6(io, gname="graph")
Read a graph from IO stream `io` in the [Graph6](http://users.cecs.anu.edu.au/%7Ebdm/data/formats.txt)
format. Return the graph.
"""
loadgraph6(io::IO, gname::String="graph") = loadgraph6_mult(io)[gname]
"""
savegraph6(io, g, gname="graph")
Write a graph `g` to IO stream `io` in the [Graph6](http://users.cecs.anu.edu.au/%7Ebdm/data/formats.txt)
format. Return 1 (number of graphs written).
"""
function savegraph6 end
@traitfn function savegraph6(io::IO, g::::(!Graphs.IsDirected), gname::String="graph")
str = _graphToG6String(g)
println(io, str)
return 1
end
function savegraph6_mult(io::IO, graphs::Dict)
ng = 0
sortkeys = sort(collect(keys(graphs)))
for gname in sortkeys
ng += savegraph6(io, graphs[gname], gname)
end
return ng
end
loadgraph(io::IO, gname::String, ::Graph6Format) = loadgraph6(io, gname)
loadgraphs(io::IO, ::Graph6Format) = loadgraph6_mult(io)
function savegraph(io::IO, g::AbstractGraph, gname::String, ::Graph6Format)
return savegraph6(io, g, gname)
end
savegraph(io::IO, d::Dict, ::Graph6Format) = savegraph6_mult(io, d)
end # module
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 136 | module GraphML
import Graphs: AbstractGraphFormat
export GraphMLFormat
struct GraphMLFormat <: AbstractGraphFormat end
end # module
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 151 | module LGCompressed
import Graphs: AbstractGraphFormat
export LGCompressedFormat
struct LGCompressedFormat <: AbstractGraphFormat end
end # module
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 2324 | module NET
using Graphs: Graphs
using Graphs
using Graphs: AbstractGraphFormat
import Graphs: loadgraph, loadgraphs, savegraph
export NETFormat
struct NETFormat <: AbstractGraphFormat end
"""
savenet(io, g, gname="g")
Write a graph `g` to an IO stream `io` in the [Pajek NET](http://gephi.github.io/users/supported-graph-formats/pajek-net-format/)
format. Return 1 (number of graphs written).
"""
function savenet(io::IO, g::Graphs.AbstractGraph, gname::String="g")
println(io, "*Vertices $(nv(g))")
# write edges
if is_directed(g)
println(io, "*Arcs")
else
println(io, "*Edges")
end
for e in Graphs.edges(g)
println(io, "$(src(e)) $(dst(e))")
end
return 1
end
"""
loadnet(io::IO, gname="graph")
Read a graph from IO stream `io` in the [Pajek NET](http://gephi.github.io/users/supported-graph-formats/pajek-net-format/)
format. Return the graph.
"""
function loadnet(io::IO, gname::String="graph")
line = readline(io)
# skip comments
while startswith(line, "%")
line = readline(io)
end
n = parse(Int, match(r"\d+", line).match)
for ioline in eachline(io)
line = ioline
(occursin(r"^\*Arcs", line) || occursin(r"^\*Edges", line)) && break
end
if occursin(r"^\*Arcs", line)
g = Graphs.DiGraph(n)
else
g = Graphs.Graph(n)
end
while occursin(r"^\*Arcs", line)
for ioline in eachline(io)
line = ioline
ms = collect(m.match for m in eachmatch(r"\d+", line; overlap=false))
length(ms) < 2 && break
add_edge!(g, parse(Int, ms[1]), parse(Int, ms[2]))
end
end
while occursin(r"^\*Edges", line) # add edges in both directions
for ioline in eachline(io)
line = ioline
ms = collect(m.match for m in eachmatch(r"\d+", line; overlap=false))
length(ms) < 2 && break
i1, i2 = parse(Int, ms[1]), parse(Int, ms[2])
add_edge!(g, i1, i2)
add_edge!(g, i2, i1)
end
end
return g
end
loadgraph(io::IO, gname::String, ::NETFormat) = loadnet(io, gname)
loadgraphs(io::IO, ::NETFormat) = Dict("graph" => loadnet(io, "graph"))
savegraph(io::IO, g::AbstractGraph, gname::String, ::NETFormat) = savenet(io, g, gname)
end # module
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 2407 | # This file contains helper functions for testing the various
# GraphIO formats
using Graphs
graphs = Dict{String,Graph}(
"graph1" => complete_graph(5), "graph2" => path_graph(6), "graph3" => wheel_graph(4)
)
digraphs = Dict{String,DiGraph}(
"digraph1" => complete_digraph(5),
"digraph2" => path_digraph(6),
"digraph3" => wheel_digraph(4),
)
allgraphs = merge(graphs, digraphs)
function gettempname()
(f, fio) = mktemp()
close(fio)
return f
end
function read_test(
format::Graphs.AbstractGraphFormat,
g::Graphs.AbstractGraph,
gname::String="g",
fname::AbstractString="";
testfail=false,
)
@test loadgraph(fname, gname, format) == g
if testfail
@test_throws Union{ArgumentError,ErrorException} loadgraph(
fname, "badgraphXXX", format
)
end
@test loadgraphs(fname, format)[gname] == g
end
function read_test_mult(
format::Graphs.AbstractGraphFormat, d::Dict{String,G}, fname::AbstractString=""
) where {G<:AbstractGraph}
rd = loadgraphs(fname, format)
@test rd == d
end
function write_test(
format::Graphs.AbstractGraphFormat,
g::Graphs.AbstractGraph,
gname::String="g",
fname::AbstractString=gettempname();
remove=true,
silent=false,
)
@test savegraph(fname, g, gname, format) == 1
if remove
rm(fname)
elseif !silent
info("graphio/write_test: Left temporary file at: $fname")
end
end
function write_test(
format::Graphs.AbstractGraphFormat,
d::Dict{String,G},
fname::AbstractString=gettempname();
remove=true,
silent=false,
) where {G<:Graphs.AbstractGraph}
@test savegraph(fname, d, format) == length(d)
if remove
rm(fname)
elseif !silent
info("graphio/write_test: Left temporary file at: $fname")
end
end
function readback_test(
format::Graphs.AbstractGraphFormat,
g::Graphs.AbstractGraph,
gname="graph",
fname=gettempname();
remove=true,
testfail=false,
)
@test savegraph(fname, g, format) == 1
@test loadgraphs(fname, format)[gname] == g
@test loadgraph(fname, gname, format) == g
if testfail
@test_throws Union{ArgumentError,ErrorException} loadgraph(
fname, "badgraphXXX", format
)
end
if remove
rm(fname)
else
info("graphio/readback_test: Left temporary file at: $fname")
end
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 805 | using Aqua
using GraphIO
using Graphs
using JuliaFormatter
using Test
testdir = dirname(@__FILE__)
modules = [
"CDF", "Edgelist", "GML", "NET", "DOT", "GEXF", "Graph6", "GraphML", "LGCompressed"
]
include("graphio.jl")
# write your own tests here
@testset verbose = true "GraphIO" begin
@testset "Code quality" begin
Aqua.test_all(GraphIO; stale_deps=false, project_toml_formatting=false)
Aqua.test_stale_deps(GraphIO; ignore=[:Requires])
if VERSION >= v"1.9"
Aqua.test_project_toml_formatting(GraphIO)
end
end
@testset "Code formatting" begin
@test JuliaFormatter.format(GraphIO; verbose=false, overwrite=false)
end
for name in modules
path = joinpath(testdir, name, "runtests.jl")
include(path)
end
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 200 | using Test
using GraphIO.CDF
@testset "CDF" begin
g = loadgraph(joinpath(testdir, "testdata", "30bus.jlg"))
read_test(CDFFormat(), g, "graph", joinpath(testdir, "testdata", "30bus.cdf"))
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 1643 | using Test
using ParserCombinator
using GraphIO.DOT
using Graphs.Experimental
@testset "DOT" begin
g = complete_graph(6)
dg = DiGraph(4)
for e in [Edge(1, 2), Edge(1, 3), Edge(2, 2), Edge(2, 3), Edge(4, 1), Edge(4, 3)]
add_edge!(dg, e)
end
fname = joinpath(testdir, "testdata", "twographs.dot")
read_test(DOTFormat(), g, "g1", fname; testfail=true)
read_test(DOTFormat(), dg, "g2", fname)
read_test_mult(DOTFormat(), Dict{String,AbstractGraph}("g1" => g, "g2" => dg), fname)
#tests for multiple graphs
fname = joinpath(testdir, "testdata", "saved3graphs.dot")
#connected graph
g1 = SimpleGraph(5, 10)
#disconnected graph
g2 = SimpleGraph(5, 2)
#directed graph
dg = SimpleDiGraph(5, 8)
GraphDict = Dict("g1" => g1, "g2" => g2, "dg" => dg)
write_test(DOTFormat(), GraphDict, fname; remove=false, silent=true)
#adding this test because currently the Parser returns unordered vertices
@test has_isomorph(loadgraph(fname, "g1", DOTFormat()), g1)
@test has_isomorph(loadgraph(fname, "g2", DOTFormat()), g2)
@test has_isomorph(loadgraph(fname, "dg", DOTFormat()), dg)
rm(fname)
#tests for single graph
fname1 = joinpath(testdir, "testdata", "saved1graph.dot")
write_test(DOTFormat(), g1, "g1", fname1; remove=false, silent=true)
@test has_isomorph(loadgraph(fname1, "g1", DOTFormat()), g1)
fname2 = joinpath(testdir, "testdata", "saved1digraph.dot")
write_test(DOTFormat(), dg, "dg", fname2; remove=false, silent=true)
@test has_isomorph(loadgraph(fname2, "dg", DOTFormat()), dg)
rm(fname1)
rm(fname2)
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 234 | using Test
using GraphIO.EdgeList
using GraphIO.EdgeList: IntEdgeListFormat
@testset "EdgeList" begin
for g in values(digraphs)
readback_test(EdgeListFormat(), g)
readback_test(IntEdgeListFormat(), g)
end
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 144 | using Test
using EzXML
using GraphIO.GEXF
@testset "GEXF" begin
for g in values(allgraphs)
write_test(GEXFFormat(), g)
end
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 209 | using Test
using ParserCombinator
using GraphIO.GML
@testset "GML" begin
for g in values(allgraphs)
readback_test(GMLFormat(), g; testfail=true)
end
write_test(GMLFormat(), allgraphs)
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 567 | using Test
using GraphIO.Graph6
@testset "Graph6" begin
n1 = (30, UInt8.([93]))
n2 = (12345, UInt8.([126; 66; 63; 120]))
n3 = (460175067, UInt8.([126; 126; 63; 90; 90; 90; 90; 90]))
ns = [n1; n2; n3]
for n in ns
@test Graph6._g6_N(n[1]) == n[2]
@test Graph6._g6_Np(n[2])[1] == n[1]
end
for g in values(graphs)
readback_test(Graph6Format(), g, "graph1")
end
f = gettempname()
write_test(Graph6Format(), graphs, f; remove=false, silent=true)
read_test_mult(Graph6Format(), graphs, f)
rm(f)
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 668 | using Test
using EzXML
using GraphIO.GraphML
@testset "GraphML" begin
for g in values(allgraphs)
readback_test(GraphMLFormat(), g; testfail=true)
end
fname = joinpath(testdir, "testdata", "warngraph.graphml")
@test_logs (
:warn, "Skipping unknown node 'warnnode' - further warnings will be suppressed"
) match_mode = :any loadgraphs(fname, GraphMLFormat())
@test_logs (
:warn,
"Skipping unknown XML element 'warnelement' - further warnings will be suppressed",
) match_mode = :any loadgraph(fname, "graph", GraphMLFormat())
d = loadgraphs(fname, GraphMLFormat())
write_test(GraphMLFormat(), d)
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 190 | using Test
using CodecZlib
using GraphIO.LGCompressed
@testset "LGCompressed" begin
for g in values(allgraphs)
readback_test(LGCompressedFormat(), g; testfail=true)
end
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | code | 249 | using Test
using GraphIO.NET
@testset "Pajek NET" begin
for g in values(allgraphs)
readback_test(NETFormat(), g)
end
fname = joinpath(testdir, "testdata", "kinship.net")
@test length(loadgraphs(fname, NETFormat())) == 1
end
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.7.0 | bc5b7609e9f4583f303a0ab2a7016ea318464da0 | docs | 4152 | # GraphIO
[](https://github.com/JuliaGraphs/GraphIO.jl/actions?query=workflow%3ACI+branch%3Amaster)
[](http://codecov.io/github/JuliaGraphs/GraphIO.jl?branch=master)
[](https://github.com/invenia/BlueStyle)
[](https://github.com/JuliaTesting/Aqua.jl)
GraphIO provides support to [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl) for reading/writing graphs in various formats.
Currently, the following functionality is provided:
| Format | Read | Write | Multiple Graphs | Format Name | Comment |
| ----------- | ---- | ----- | --------------- | -------------- | ------------------------------------------------------------------------------------------- |
| EdgeList | β | β | | EdgeListFormat | a simple list of sources and dests separated by whitespace and/or comma, one pair per line. |
| [GML] | β | β | β | GMLFormat | |
| [Graph6] | β | β | β | Graph6Format | |
| [GraphML] | β | β | β | GraphMLFormat | |
| [Pajek NET] | β | β | | NETFormat | |
| [GEXF] | | β | | GEXFFormat | |
| [DOT] | β | | β | DOTFormat | |
| [CDF] | β | | | CDFFormat | |
Graphs are read using either the `loadgraph` function or, for formats that support multiple graphs in a single file,
the `loadgraphs` functions. `loadgraph` returns a Graph object, while `loadgraphs` returns a dictionary of Graph objects.
For example, an edgelist file could be loaded as:
```julia
graph = loadgraph("path_to_graph/my_edgelist.txt", "graph_key", EdgeListFormat())
```
## Reading different graph types
All `*Format` types are readily accessible.
However, in order to use some of them with `loadgraph`, additional packages are required.
You may thus need to install and load the following dependencies before using parts of GraphIO.jl:
- Reading [DOT] or [GML] files: do `using ParserCombinator`
- Reading [GEXF] or [GraphML] files: do `using EzXML`
- Reading [GML] files: do `using CodecZlib`
The current design avoids populating your environment with unnecessary dependencies.
> **_IMPLEMENTATION NOTE:_**
> The current design uses package extensions, introduced in Julia v1.9.
> At the moment, package extensions cannot conditionally load types, that is one of the main reasons why all `*Format` types are readily accessible.
> However, the functionality of `loadgraph` is extended for the various types only when the appropriate dependencies are available.
> We are searching for more intuitive ways to design this interface.
[CDF]: http://www2.ee.washington.edu/research/pstca/formats/cdf.txt
[GML]: https://en.wikipedia.org/wiki/Graph_Modelling_Language
[Graph6]: https://users.cecs.anu.edu.au/~bdm/data/formats.html
[GraphML]: https://en.wikipedia.org/wiki/GraphML
[Pajek NET]: https://gephi.org/users/supported-graph-formats/pajek-net-format/
[GEXF]: https://gephi.org/gexf/format/
[DOT]: https://en.wikipedia.org/wiki/DOT_(graph_description_language)
| GraphIO | https://github.com/JuliaGraphs/GraphIO.jl.git |
|
[
"MIT"
] | 0.1.0 | 9f838cff04c1e516826d9e77d8058bec64a42010 | code | 590 | using PulsarSearch
using Documenter
makedocs(;
modules=[PulsarSearch],
authors="Matteo Bachetti <[email protected]> and contributors",
repo="https://github.com/JuliaAstro/PulsarSearch.jl/blob/{commit}{path}#L{line}",
sitename="PulsarSearch.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://juliaastro.github.io/PulsarSearch.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/JuliaAstro/PulsarSearch.jl",
devbranch = "main"
)
| PulsarSearch | https://github.com/JuliaAstro/PulsarSearch.jl.git |
|
[
"MIT"
] | 0.1.0 | 9f838cff04c1e516826d9e77d8058bec64a42010 | code | 4856 | module PulsarSearch
using StatsBase
include("stats.jl")
"""
`z_n_binned(profile, n) --> zsq`
``Z^2_n`` statistic for pulse profiles from binned events
See Bachetti+2021, arXiv:2012.11397
Parameters
----------
`profile` : array of floats
The folded pulse profile (containing the number of
photons falling in each pulse bin)
`n` : int
Number of harmonics, including the fundamental
Returns
-------
`zsq` : float
The value of the statistic
# Examples
```jldoctest
julia> using PulsarSearch
julia> z_n_binned(zeros(10), 2)
0.0
julia> z_n_binned(ones(10), 2) < 1e-30
true
julia> z_n_binned([10., 0., 0., 0., 0.], 2)
40.0
```
"""
function z_n_binned(profile::AbstractVector{}, n::Integer)::Float64
total = sum(profile)
N = length(profile)
phase = range(0, stop = N - 1) * (2pi/ N)
if iszero(total)
return 0.0
end
z = zero(Float64)
for k in range(1, stop = n)
s = 0.0
c = 0.0
for i in eachindex(profile)
sk, ck = sincos(k * phase[i])
s += profile[i] * sk
c += profile[i] * ck
end
s = s^2
c = c^2
z += c + s
end
return z * 2 / total
end
"""
`z_n(phases, n) --> zsq`
``Z^2_n`` statistics, Γ la Buccheri+83, A&A, 128, 245, eq. 2.
Parameters
----------
`phase` : array of floats
The phases of the events
`n` : int, default 2
Number of harmonics, including the fundamental
Returns
-------
`zsq` : float
The ``Z^2_n`` statistic
# Examples
```jldoctest
julia> using PulsarSearch
julia> z_n([10., 0., 0., 0., 0.], 2)
20.0
julia> z_n(ones(10), 2)
40.0
julia> z_n(Array([0.5]), 2)
0.0
```
"""
function z_n(phases::AbstractVector{T}, n::Integer)::T where {T<:AbstractFloat}
N = length(phases)
if N < 2
return zero(T)
end
z = zero(T)
twopiphase = 2 * pi * phases
for k in range(1, stop = n)
s = zero(T)
c = zero(T)
for ph in twopiphase
sk, ck = sincos(k * ph)
s += sk
c += ck
end
s = s^2
c = c^2
z += c + s
end
return z * 2 / N
end
"""
`z_n_search(times, n, fmin, fmax [,oversample]) --> freqs, zsq_stat`
Calculate the ``Z^2_n`` statistics at trial frequencies in photon data.
Parameters
----------
`times` : array-like
the event arrival times
`n` : int
the number of harmonics in ``Z^2_n``
Other Parameters
----------------
`fmin` : float
minimum pulse frequency to search
`fmax` : float
maximum pulse frequency to search
`oversample` : float
Oversampling factor with respect to the usual 1/T/n rule
Returns
-------
`fgrid` : array-like
frequency grid of the epoch folding periodogram
`zsq_stat` : array-like
the Z^2_n statistics corresponding to each frequency bin.
"""
function z_n_search(
times::AbstractVector{T},
n::Integer,
fmin::Number,
fmax::Number;
oversample::Number = 2,
) where {T <: AbstractFloat}
t0 = first(times)
t1 = last(times)
df = 1 / (t1 - t0) / oversample
freqs = fmin:df:fmax
N = length(freqs)
stats = Vector{T}(undef, N)
for i in eachindex(freqs)
phases = times * freqs[i]
phases .-= floor.(phases)
stats[i] = z_n(phases, n)
end
return freqs, stats
end
"""
`z_n_search_hist(times, n, fmin, fmax [,oversample]) --> freqs, zsq_stat`
Calculate the ``Z^2_n`` statistics at trial frequencies in photon data.
Pre-bins the data using a histogram.
At the moment, it is _not_ faster. It will be after I add the 2-d hist
+ shift-and-add. But it allowed to test that `z_n_binned works` with
binned data.
Parameters
----------
`times` : array-like
the event arrival times
`n` : int
the number of harmonics in ``Z^2_n``
Other Parameters
----------------
`fmin` : float
minimum pulse frequency to search
`fmax` : float
maximum pulse frequency to search
`oversample` : float
Oversampling factor with respect to the usual 1/T/n rule
Returns
-------
`fgrid` : array-like
frequency grid of the epoch folding periodogram
`zsq_stat` : array-like
the Z^2_n statistics corresponding to each frequency bin.
"""
function z_n_search_hist(
times::AbstractVector{T},
n::Integer,
fmin::Number,
fmax::Number;
oversample::Number = 2,
nbin::Integer = 16
) where {T <: AbstractFloat}
t0 = first(times)
t1 = last(times)
df = 1 / (t1 - t0) / oversample
freqs = fmin:df:fmax
N = length(freqs)
stats = Vector{T}(undef, N)
for i in eachindex(freqs)
phases = times * freqs[i]
phases .-= floor.(phases)
hist = fit(Histogram, phases, 0.0:(1/nbin):1.0)
stats[i] = z_n_binned(hist.weights, n)
end
return freqs, stats
end
export z_n
export z_n_binned
export z_n_search
export z_n_search_hist
# Write your package code here.
end
| PulsarSearch | https://github.com/JuliaAstro/PulsarSearch.jl.git |
|
[
"MIT"
] | 0.1.0 | 9f838cff04c1e516826d9e77d8058bec64a42010 | code | 11675 | using Distributions
"""Equivalent gaussian sigma for small log-probability.
Return the equivalent gaussian sigma corresponding to the natural log of
the cumulative gaussian probability logp. In other words, return x, such
that Q(x) = p, where Q(x) is the cumulative normal distribution. This
version uses the rational approximation from Abramowitz and Stegun,
eqn 26.2.23, that claims to be precise to ~1e-4. Using the log(P) as input
gives a much extended range.
The parameters here are the result of a best-fit, with no physical meaning.
Translated from Scott Ransom's PRESTO
"""
function extended_equiv_gaussian_Nsigma(logp::Number)
t = sqrt(-2.0 * logp)
num = 2.515517 + t * (0.802853 + t * 0.010328)
denom = 1.0 + t * (1.432788 + t * (0.189269 + t * 0.001308))
return t - num / denom
end
"""Number of Gaussian sigmas corresponding to tail log-probability.
This function computes the value of the characteristic function of a
standard Gaussian distribution for the tail probability equivalent to the
provided p-value, and turns this value into units of standard deviations
away from the Gaussian mean. This allows the user to make a statement
about the signal such as βI detected this pulsation at 4.1 sigma
The example values below are obtained by brute-force integrating the
Gaussian probability density function using the mpmath library
between Nsigma and +inf.
# Examples
```jldoctest
julia> using PulsarSearch
julia> pvalues = Array([0.15865525393145707, 0.0013498980316301035, 9.865877e-10, 6.22096e-16, 3.0567e-138]);
julia> log_pvalues = log.(pvalues);
julia> sigmas = Array([1, 3, 6, 8, 25]);
julia> all(isapprox(equivalent_gaussian_Nsigma_from_logp.(log_pvalues), sigmas; atol=0.1))
true
```
"""
function equivalent_gaussian_Nsigma_from_logp(logp)
if logp < -300
# print("Extended")
return extended_equiv_gaussian_Nsigma(logp)
end
d = Normal()
return cquantile(d, exp(logp))
end
export equivalent_gaussian_Nsigma_from_logp
"""Number of Gaussian sigmas corresponding to tail probability.
This function computes the value of the characteristic function of a
standard Gaussian distribution for the tail probability equivalent to the
provided p-value, and turns this value into units of standard deviations
away from the Gaussian mean. This allows the user to make a statement
about the signal such as βI detected this pulsation at 4.1 sigma
"""
function equivalent_gaussian_Nsigma(p)
return equivalent_gaussian_Nsigma_from_logp(log(p))
end
export equivalent_gaussian_Nsigma
"""Asymptotic natural log of incomplete gamma function.
Return the natural log of the incomplete gamma function in
its asymptotic limit as z->infty. This is from Abramowitz
and Stegun eqn 6.5.32.
Translated from Scott Ransom's PRESTO
# Examples
```jldoctest
julia> using PulsarSearch
julia> pvalues = Array([0.15865525393145707, 0.0013498980316301035]);
julia> sigmas = Array([1, 3]);
julia> all(isapprox(equivalent_gaussian_Nsigma.(pvalues), sigmas; atol=0.1))
true
```
"""
function log_asymptotic_incomplete_gamma(a, z)
x = 1.0
newxpart = 1.0
term = 1.0
ii = 1
while (abs(newxpart) > 1e-15)
term *= (a - ii)
newxpart = term / z^ii
x += newxpart
ii += 1
end
return (a - 1.0) * log(z) - z + log(x)
end
export log_asymptotic_incomplete_gamma
"""Natural log of the Gamma function in its asymptotic limit.
Return the natural log of the gamma function in its asymptotic limit
as z->infty. This is from Abramowitz and Stegun eqn 6.1.41.
Translated from Scott Ransom's PRESTO
"""
function log_asymptotic_gamma(z)
half_log_twopi = 0.91893853320467267 # (1/2)*log(2*pi)
one_twelfth = 8.3333333333333333333333e-2
one_degree = 2.7777777777777777777778e-3 # 1 / 360
one_over_1680 = 5.9523809523809529e-4
one_over_1260 = 7.9365079365079365079365e-4
x = (z - 0.5) * log(z) - z + half_log_twopi
y = 1.0 / (z * z)
x +=
(
((-one_over_1680 * y + one_over_1260) * y - one_degree) * y +
one_twelfth
) / z
return x
end
"""Log survival function of the chi-squared distribution.
# Examples
```jldoctest chi2_logp
julia> using PulsarSearch;
julia> using Distributions;
julia> chi2 = 31;
julia> d = Chisq(2);
julia> isapprox(chi2_logp(chi2, 2), logccdf(d, chi2), atol=0.1)
true
julia> chi2 = Array([5, 32]);
julia> all(isapprox.(chi2_logp.(chi2, 2), logccdf.(d, chi2), atol=0.1))
true
```
"""
function chi2_logp(chi2, dof)
# If very large reduced chi squared, use approximation. This is an
# eyeballed limit parameter space where the difference between the
# approximation and the scipy version is tiny, but above which the scipy
# version starts failing.
if (chi2 / dof > 15.0) || ((dof > 150) && (chi2 / dof > 6.0))
return log_asymptotic_incomplete_gamma(0.5 * dof, 0.5 * chi2) -
log_asymptotic_gamma(0.5 * dof)
end
d = Chisq(dof)
return logccdf(d, chi2)
end
export chi2_logp
"""Calculate a multi-trial p-value from the log of a single-trial one.
This allows to work around Numba's limitation on longdoubles, a way to
vectorize the computation when we need longdouble precision.
Parameters
----------
logp1 : float
The natural logarithm of the significance at which we reject the null
hypothesis on each single trial.
n : int
The number of trials
Returns
-------
logpn : float
The log of the significance at which we reject the null hypothesis
after multiple trials
"""
function logp_multitrial_from_single_logp(logp1, n)
# If the the probability is very small (p1 * n) < 1e-6, use Bonferroni
# approximation.
logn = log(n)
if logp1 + logn < -7
return logp1 + logn
end
return log(1 - (1 - exp(logp1))^n)
end
export logp_multitrial_from_single_logp
raw"""Calculate a multi-trial p-value from a single-trial one.
Calling *p* the probability of a single success, the Binomial
distributions says that the probability *at least* one outcome
in n trials is
``P(k\geq 1) = \sum_{k\geq 1} \binom{n}{k} p^k (1-p)^{(n-k)}``
or more simply, using P(k β₯ 0) = 1
P(k\geq 1) = 1 - \binom{n}{0} (1-p)^n = 1 - (1-p)^n``
Parameters
----------
p1 : float
The significance at which we reject the null hypothesis on
each single trial.
n : int
The number of trials
Returns
-------
pn : float
The significance at which we reject the null hypothesis
after multiple trials
"""
function p_multitrial_from_single_trial(p1, n)
logpn = logp_multitrial_from_single_logp(log(p1), n)
return exp(logpn)
end
export p_multitrial_from_single_trial
"""Calculate a multi-trial p-value from the log of a single-trial one.
This allows to work around Numba's limitation on longdoubles, a way to
vectorize the computation when we need longdouble precision.
Parameters
----------
logpn : float
The natural logarithm of the significance at which we want to reject
the null hypothesis after multiple trials
n : int
The number of trials
Returns
-------
logp1 : float
The log of the significance at which we reject the null hypothesis on
each single trial.
"""
function logp_single_trial_from_logp_multitrial(logpn, n)
logn = log(n)
# If the the probability is very small, use Bonferroni approximation.
if logpn < -7
return logpn - logn
end
# Numerical errors arise when pn is very close to 1.
p1 = 1 - (1 - exp(logpn))^(1 / n)
return log(p1)
end
export logp_single_trial_from_logp_multitrial
raw"""Calculate the single-trial p-value from a total p-value
Let us say that we want to reject a null hypothesis at the
``pn`` level, after executing ``n`` different measurements.
This might be the case because, e.g., we
want to have a 1% probability of detecting a signal in an
entire power spectrum, and we need to correct the detection
level accordingly.
The typical procedure is dividing the initial probability
(often called _epsilon_) by the number of trials. This is
called the Bonferroni correction and it is often a good
approximation, when ``pn`` is low: ``p1 = pn / n``.
However, if ``pn`` is close to 1, this approximation gives
incorrect results.
Here we calculate this probability by inverting the Binomial
problem. Given that (see ``p_multitrial_from_single_trial``)
the probability of getting more than one hit in n trials,
given the single-trial probability *p*, is
``P (k \geq 1) = 1 - (1 - p)^n``
we get the single trial probability from the multi-trial one
from
``p = 1 - (1 - P)^{(1/n)}``
This is also known as Ε idΓ‘k correction.
Parameters
----------
pn : float
The significance at which we want to reject the null
hypothesis after multiple trials
n : int
The number of trials
Returns
-------
p1 : float
The significance at which we reject the null hypothesis on
each single trial.
"""
function p_single_trial_from_p_multitrial(pn, n)
logp = logp_single_trial_from_logp_multitrial(log(pn), n)
return exp(logp)
end
export p_single_trial_from_p_multitrial
"""Calculate the probability of a certain folded profile, due to noise.
Parameters
----------
z2 : float
A ``Z^2_n`` statistics value
n : int, default 2
The ``n`` in ``Z^2_n`` (number of harmonics, including the fundamental)
Other Parameters
----------------
ntrial : int
The number of trials executed to find this profile
n_summed_spectra : int
Number of ``Z_2^n`` periodograms that were averaged to obtain z2
Returns
-------
p : float
The probability that the ``Z^2_n`` value has been produced by noise
"""
function z2_n_probability(z2, n; ntrial = 1, n_summed_spectra = 1)
d = Chisq(2 * n * n_summed_spectra)
epsilon_1 = ccdf(d, z2 * n_summed_spectra)
epsilon = p_multitrial_from_single_trial(epsilon_1, ntrial)
return epsilon
end
export z2_n_probability
"""Calculate the probability of a certain folded profile, due to noise.
Parameters
----------
z2 : float
A ``Z^2_n`` statistics value
n : int, default 2
The ``n`` in ``Z^2_n`` (number of harmonics, including the fundamental)
Other Parameters
----------------
ntrial : int
The number of trials executed to find this profile
n_summed_spectra : int
Number of ``Z_2^n`` periodograms that were averaged to obtain z2
Returns
-------
p : float
The probability that the ``Z^2_n`` value has been produced by noise
"""
function z2_n_logprobability(z2, n; ntrial = 1, n_summed_spectra = 1)
epsilon_1 = chi2_logp(z2 * n_summed_spectra, 2 * n * n_summed_spectra)
epsilon = logp_multitrial_from_single_logp(epsilon_1, ntrial)
return epsilon
end
export z2_n_logprobability
"""Return the detection level for the ``Z^2_n`` statistics.
See Buccheri et al. (1983), Bendat and Piersol (1971).
Parameters
----------
n : int, default 2
The ``n`` in ``Z^2_n`` (number of harmonics, including the fundamental)
epsilon : float, default 0.01
The fractional probability that the signal has been produced by noise
Other Parameters
----------------
ntrial : int
The number of trials executed to find this profile
n_summed_spectra : int
Number of Z_2^n periodograms that are being averaged
Returns
-------
detlev : float
The epoch folding statistics corresponding to a probability
epsilon * 100 % that the signal has been produced by noise
"""
function z2_n_detection_level(
n = 2,
epsilon = 0.01;
ntrial = 1,
n_summed_spectra = 1,
)
epsilon = p_single_trial_from_p_multitrial(epsilon, ntrial)
d = Chisq(2 * n_summed_spectra * n)
retlev = cquantile(d, epsilon) / (n_summed_spectra)
return retlev
end
export z2_n_detection_level
| PulsarSearch | https://github.com/JuliaAstro/PulsarSearch.jl.git |
|
[
"MIT"
] | 0.1.0 | 9f838cff04c1e516826d9e77d8058bec64a42010 | code | 2597 | using PulsarSearch
using Test
using Distributions
using Documenter
doctest(PulsarSearch)
@testset "PulsarSearch.jl" begin
@testset "Basic" begin
flatarr = Array([0.5])
@test z_n(flatarr, 2) == 0
flatarr = Array(range(1, stop = 10) / 10)
@test z_n(flatarr, 2) < 1e-10
@test z_n(ones(10), 2) == 40
end
@testset "Binned" begin
flatarr = zeros(10)
@test z_n_binned(flatarr, 2) == 0
flatarr = ones(10)
@test z_n_binned(flatarr, 2) < 1e-10
arr = zeros(10)
arr[1] = 10
@test z_n(arr, 2) == 40
end
@testset "Search" begin
phases = rand(Normal(0.5, 0.1), 10000)
pulse_no = rand(Uniform(0, 1000), 10000)
pulse_no = floor.(pulse_no)
f = 1.123
times = sort((phases + pulse_no) / f)
freqs, stats = z_n_search(times, 2, 1.0, 1.5, oversample = 4.0)
freqs_bin, stats_bin = z_n_search_hist(times, 2, 1.0, 1.5, oversample = 4.0, nbin=256)
maxind = argmax(stats)
@test abs(freqs[maxind] - f) < 1e-3
maxind = argmax(stats_bin)
@test abs(freqs_bin[maxind] - f) < 1e-3
@test isapprox(stats_bin[maxind], stats[maxind], rtol=0.1)
end
end
@testset "Stat" begin
@testset "single_from_multi $ntrial" for ntrial in
[1, 10, 100, 1000, 10000, 100000]
epsilon_1 = 0.00000001
epsilon_n = p_multitrial_from_single_trial(epsilon_1, ntrial)
epsilon_1_corr = p_single_trial_from_p_multitrial(epsilon_n, ntrial)
@test isapprox(epsilon_1_corr, epsilon_1; rtol = 1e-2)
end
@testset "Zn Det Lev" begin
@test isapprox(z2_n_detection_level(2), 13.276704135987625)
epsilon_corr = p_single_trial_from_p_multitrial(0.01, 2)
@test isapprox(
z2_n_detection_level(4, 0.01, ntrial = 2),
z2_n_detection_level(4, epsilon_corr),
)
end
@testset "Zn Probability ntrial $ntrial" for ntrial in
[1, 10, 100, 1000, 100000]
detlev = z2_n_detection_level(2, 0.1, ntrial = ntrial)
@test isapprox(z2_n_probability(detlev, 2, ntrial = ntrial), 0.1)
@test isapprox(z2_n_logprobability(detlev, 2, ntrial = ntrial), -2.3025850929940455)
end
@testset "Chisq logp" begin
chi2 = 31
d = Chisq(2)
@test isapprox(chi2_logp(chi2, 2), logccdf(d, chi2), atol = 0.1)
chi2 = Array([5, 32])
@test all(isapprox.(chi2_logp.(chi2, 2), logccdf.(d, chi2), atol = 0.1))
end
end
| PulsarSearch | https://github.com/JuliaAstro/PulsarSearch.jl.git |
|
[
"MIT"
] | 0.1.0 | 9f838cff04c1e516826d9e77d8058bec64a42010 | docs | 739 | # PulsarSearch
[](https://juliaastro.github.io/PulsarSearch.jl/stable)
[](https://juliaastro.github.io/PulsarSearch.jl/dev)
[](https://github.com/juliaastro/PulsarSearch.jl/actions)
[](https://codecov.io/gh/juliaastro/PulsarSearch.jl)
This is a draft Julia package for pulsar searches. At the moment it only contains the basic functions for the $Z_2^2$ search, in the single-photon (Buccheri et al. 2003) and binned (Bachetti et al. 2021) versions.
| PulsarSearch | https://github.com/JuliaAstro/PulsarSearch.jl.git |
|
[
"MIT"
] | 0.1.0 | 9f838cff04c1e516826d9e77d8058bec64a42010 | docs | 231 | ```@meta
CurrentModule = PulsarSearch
```
# PulsarSearch
`PulsarSearch` is a set of tools for (X-ray) pulsar searches.
VERY EARLY DRAFT. Mostly for educational purposes
```@index
```
```@autodocs
Modules = [PulsarSearch]
```
| PulsarSearch | https://github.com/JuliaAstro/PulsarSearch.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 240 | using Documenter, SAMtools
makedocs(
modules = [SAMtools],
format = :html,
checkdocs = :exports,
sitename = "SAMtools.jl",
pages = Any["index.md"]
)
deploydocs(
repo = "github.com/aramirezreyes/SAMtools.jl.git",
)
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 3049 | VERSION < v"0.1"
"""
Data filters: `filter_array`, `filter_array_2`, `filter_array_time`, `getsmoothdata`, `getsmoothdata_nospace`
Data structures: `ape_budget`, `cat_ape_budget`, `cutborders!`, `surf_quantities`, `cyclone_comp_timemean`, `Composite_Cyclone`, `Composite_Cyclone_v2`, `Composite_Cyclone_v3`
Methods: `cyclonecompositer`, `shifter`, `smoothfilter`, `cyclonecompositer_v2`, `cyclonecompositer_v3`, `timemean_nofalseframe`, `removefalseframes`, `getapebudget`, `buoyancybudget`
"""
module AvailablePotentialEnergyFramework
using DataStructures: OrderedDict
using ImageFiltering: imfilter, imfilter!, centered, kernelfactors, mapwindow, mapwindow!, Kernel, Inner
using Images: findlocalminima, findlocalmaxima
using ImageSegmentation: SegmentedImage, segment_labels, region_adjacency_graph, seeded_region_growing, labels_map
using Interpolations: LinearInterpolation, interpolate, Gridded, Linear
using NCDatasets: Dataset, variable, defVar
using OffsetArrays: OffsetArray
using SparseArrays: SparseMatrixCSC
using Statistics: mean, median!, mean!
using Unitful: @u_str, unit, ustrip, Quantity
using LoopVectorization: @turbo, @tturbo
using Tullio: @tullio
include("apehelperfunctions.jl")
include("compositehelperfunctions.jl")
include("apebudgets.jl")
include("physicalconstants.jl")
include("physicsfunctions.jl")
include("useful_diagnostics.jl")
#include("ape_computation_from_julia_output.jl") #testing purposes
include("arrayfiltering.jl")
include("datamanagement.jl")
export
# Filters
filter_array!,
filter_array_2!,
filter_array_time,
filter_array,
getsmoothdata!,
getsmoothdata_nospace,
#Data structures
cat_ape_budget,
cutborders!,
#Methods
# findlocalmaxima,
cyclonecompositer,
shifter,
shifter!,
smooth_and_mask,
smooth_and_mask!,
getapebudget,
buoyancybudget,
add_allcyclones!,
averageallindistance,
detect_cyclones,
detect_cyclones!,
get_diabatic_as_residual_buoyancy,
run_distributed_test,
##Constants
R,
Dryair,
Watervapor,
Liquidwater,
epsilon,
g,
#Physics functions
compute_N2,
compute_mse,
get_mse_budget,
get_vorticity,
get_okubo_weiss,
get_divergence,
get_saturation_vapor_pressure,
get_partial_vapor_pressure,
get_mixing_ratio,
get_virtual_temperature,
get_lifted_condensation_level,
get_specific_entropy,
get_potential_temperature,
get_virtual_temperature,
mixing_ratio_to_specific_humidity,
specific_humidity_to_mixing_ratio,
get_buoyancy_of_lifted_parcel,
surface_sensible_heat_flux_to_buoyancy,
surface_latent_heat_flux_to_buoyancy,
get_buoyancy,
radiative_heating_rate_to_buoyancy,
get_density_ideal_gas,
#math functions
integrate_vertically,
average_precipitation_per_pw_bin,
average_precipitation_per_pw_bin_dayang,
#Files and datamanagement
smooth_vars_and_write_to_netcdf!,
create_APE_netcdf,
set_netcdf_var!
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 20203 | ### Two methods to compute APE budgets and Buoyancy budgets from SAM v3 outputs, modified from Da Yang, and based on the mathematics from
# Yang, Da. βBoundary Layer Diabatic Processes, the Virtual Effect, and Convective Self-Aggregation.β Journal of Advances in Modeling Earth Systems 10, no. 9 (September 2018): 2163β76. https://doi.org/10.1029/2017MS001261.
"""
Returns an apebudget object.
Input is
B (buoyancy)
U,V,W The three dimensional velocities U,V and W
N2 The Brunt Va"isala frequency squared
Fs the Surface fluxes
Diabatic_other other sources of diabatic heating
rho0 the mean density
x,y,z,t the coordinate vectors
dx,dy,dz,dt the steps in each coordinate
z_up the maximum height that will be used
"""
function getapebudget_old_old(B, U,V, W, N2, RAD_b, Fs, Diabatic_other, rho0, x,y, z, t, dx,dy, dz, dt, z_up)
#***********Empty array generation***********#
Udb2dx = similar(U)
Vdb2dy = similar(V)
#************* APE **************
b2 = B.*B/2
APE_b2 = mean(b2,dims=(1,2))[1,1,:,:]./N2;
# # # figure(4)
# # # contourf(APE_b2)
#************ KE ********************
#KE = U.*U/2 + V.*V/2
xBar_KE = mean(U.*U/2 + V.*V/2,dims=(1,2))[1,1,:,:]
#************ APE rate ***************
xBar_APE_rate = Array{eltype(B),2}(undef,length(z), length(t))
xBar_APE_rate[:,1:end-1] = (APE_b2[:,2:end] - APE_b2[:,1:end-1])/dt;
xBar_APE_rate[:,end] = xBar_APE_rate[:,end-1]
#************* UdxB2 **************
b2_ghost= Array{typeof(B)}(undef, length(x)+1,length(y)+1, length(z), length(t))
b2_ghost[1:end-1,1:end-1,:,:] = b2
b2_ghost[end,1:end-1,:,:] = b2[1,:,:,:]
b2_ghost[1:end-1,end,:,:] = b2[:,1,:,:]
@. Udb2dx = U*(b2_ghost[2:end,1:end-1,:,:]-b2_ghost[1:end-1,1:end-1,:,:])/dx
@. Vdb2dy = V*(b2_ghost[1:end-1,2:end,:,:]-b2_ghost[1:end-1,1:end-1,:,:])/dy
xBar_APE_Ub2 = mean(Udb2dx,dims=(1,2))[1,1,:,:]./N2
xBar_APE_Vb2 = mean(Vdb2dy,dims=(1,2))[1,1,:,:]./N2
# static stability WN2
#APE_WN2 = W.*B
xBar_APE_WN2= mean(W.*B,dims=(1,2))[1,1,:,:]
# RAD generation
xBar_APE_RAD = mean(RAD_b.*B,dims=(1,2))[1,1,:,:]./N2;
# Diabatic_other
xBar_APE_DIA = mean(Diabatic_other.*B,dims=(1,2))[1,1,:,:]./N2;
# interpolation
k_up = argmin(abs.(z.-z_up));
z1 = z[1]:dz:z[k_up];
@info z1
rho01 = zeros(length(z1),length(t))
xBar_APE_b21 = similar(rho01)
xBar_APE_RAD1 = similar(rho01)
xBar_APE_DIA1 = similar(rho01)
xBar_APE_WN21 = similar(rho01)
xBar_APE_Ub21 = similar(rho01)
xBar_APE_Vb21 = similar(rho01)
xBar_KE1 = similar(rho01)
xBar_APE_rate1 = similar(rho01)
for time in 1:length(t)
rho01_itp = LinearInterpolation(z, rho0[:,time]);
xBar_APE_b21_itp = LinearInterpolation(z, APE_b2[:,time]);
xBar_APE_RAD1_itp = LinearInterpolation(z, xBar_APE_RAD[:,time]);
xBar_APE_DIA1_itp = LinearInterpolation(z, xBar_APE_DIA[:,time]);
xBar_APE_WN21_itp = LinearInterpolation(z, xBar_APE_WN2[:,time]);
xBar_APE_Ub21_itp = LinearInterpolation(z, xBar_APE_Ub2[:,time]);
xBar_APE_Vb21_itp = LinearInterpolation(z, xBar_APE_Vb2[:,time]);
xBar_KE1_itp = LinearInterpolation(z, xBar_KE[:,time]);
xBar_APE_rate1_itp = LinearInterpolation(z, xBar_APE_rate[:,time]);
rho01[:,time] = [rho01_itp(x) for x in z1]
xBar_APE_b21[:,time] = [xBar_APE_b21_itp(x) for x in z1]
xBar_APE_RAD1[:,time] = [xBar_APE_RAD1_itp(x) for x in z1]
xBar_APE_DIA1[:,time] = [xBar_APE_DIA1_itp(x) for x in z1]
xBar_APE_WN21[:,time] = [xBar_APE_WN21_itp(x) for x in z1]
xBar_APE_Ub21[:,time] = [xBar_APE_Ub21_itp(x) for x in z1]
xBar_APE_Vb21[:,time] = [xBar_APE_Vb21_itp(x) for x in z1]
xBar_KE1[:,time] = [xBar_KE1_itp(x) for x in z1]
xBar_APE_rate1[:,time] = [xBar_APE_rate1_itp(x) for x in z1]
end
# vertical integration
int_mass = sum(rho01.*dz,dims=1)[1,:]
int_KE = sum(rho01.*xBar_KE1.*dz,dims=1)[1,:]
int_APE = sum(rho01.*xBar_APE_b21.*dz,dims=1)[1,:]
int_APE_RAD = sum(rho01.*xBar_APE_RAD1.*dz,dims=1)[1,:]
int_APE_DIA = sum(rho01.*xBar_APE_DIA1.*dz,dims=1)[1,:]
int_APE_WN2 = sum(rho01.*xBar_APE_WN21.*dz,dims=1)[1,:]
int_APE_Ub2 = sum(rho01.*xBar_APE_Ub21.*dz,dims=1)[1,:]
int_APE_Vb2 = sum(rho01.*xBar_APE_Vb21.*dz,dims=1)[1,:]
int_APE_rate = sum(rho01.*xBar_APE_rate1.*dz,dims=1)[1,:]
# surface flux contribution
#N2S = N2[1,:]
#APE_Fs = B[:,:,1,:].*Fs
xBar_APE_Fs = mean(B[:,:,1,:].*Fs, dims=(1,2))[1,1,1,:]./N2[1,:];
residual = int_APE_rate .+ int_APE_Ub2 .+ int_APE_Vb2+int_APE_WN2 .- (int_APE_RAD .+ int_APE_DIA .+ xBar_APE_Fs)
return (int_mass, int_KE, int_APE, int_APE_rate, int_APE_Ub2,int_APE_Vb2, int_APE_WN2, int_APE_RAD, int_APE_DIA, xBar_APE_Fs, residual)
end
"""
-------------Computes the APE budgets budget---------------
"""
function getapebudget_old(B, U,V, W, N2, RAD_b, Fs, Diabatic_other, rho0, x,y, z, t, dx,dy, dz, dt, z_up)
N2 = reshape(N2,1,1,length(z),length(t))
#***********Empty array generation***********#
T = eltype(B)
lt = length(t)
lz = length(z)
lx = length(y)
ly = length(x)
buf = Array{T}(undef,lx,ly,lz, lt)
xBar_KE = Array{T}(undef,1,1,lz, lt)
APE_b2 = Array{T}(undef,1,1,lz, lt)
xBar_APE_rate = Array{T}(undef,lz, lt)
b2_ghost = Array{T}(undef, length(x)+1,length(y)+1, lz, lt)
xBar_APE_Ub2 = Array{T}(undef,1,1,lz, lt)
xBar_APE_Vb2 = Array{T}(undef,1,1,lz, lt)
xBar_APE_WN2 = Array{T}(undef,1,1,lz, lt)
xBar_APE_RAD = Array{T}(undef,1,1,lz, lt)
xBar_APE_DIA = Array{T}(undef,1,1,lz, lt)
xBar_APE_FS = Array{T}(undef,1,1,lz, lt)
#************* APE **************
@. buf = B*B/2
mean!(APE_b2,buf);
@. APE_b2 = APE_b2/N2
@. b2_ghost[1:end-1,1:end-1,:,:] = buf
@. b2_ghost[end,1:end-1,:,:] = buf[1,:,:,:]
@. b2_ghost[1:end-1,end,:,:] = buf[:,1,:,:]
#************ KE ********************
#KE = U.*U/2 + V.*V/2
@. buf = U*U/2 + V*V/2
xBar_KE = mean!(xBar_KE,buf)
#************ APE rate ***************
@. xBar_APE_rate[:,1:end-1] = (APE_b2[1,1,:,2:end] - APE_b2[1,1,:,1:end-1])/dt;
@. xBar_APE_rate[:,end] = xBar_APE_rate[:,end-1]
#************* UdxB2 **************
@. buf = U*(b2_ghost[2:end,1:end-1,:,:]-b2_ghost[1:end-1,1:end-1,:,:])/dx
mean!(xBar_APE_Ub2,buf)
@. buf = V*(b2_ghost[1:end-1,2:end,:,:]-b2_ghost[1:end-1,1:end-1,:,:])/dy
mean!(xBar_APE_Vb2,buf)
@. xBar_APE_Ub2 = xBar_APE_Ub2/N2
@. xBar_APE_Vb2 = xBar_APE_Vb2/N2
# static stability WN2
#APE_WN2 = W.*B
@. buf = W*B
mean!(xBar_APE_WN2,buf)
# RAD generation
@. buf = RAD_b.*B
mean!(xBar_APE_RAD,buf);
@. xBar_APE_RAD = xBar_APE_RAD/N2
# Diabatic_other
@. buf = Diabatic_other.*B
mean!(xBar_APE_DIA,buf)
@. xBar_APE_DIA = xBar_APE_DIA/N2;
# Surface fluxes contribution
xBar_APE_Fs = mean(B[:,:,1,:].*Fs, dims=(1,2))[1,1,1,:]./N2[1,1,1,:];
# interpolation
k_up = argmin(abs.(z.-z_up));
z1 = z[1]:dz:z[k_up];
# int_mass = Array{T}(undef,lt)
int_mass = zeros(T,lt)
int_KE = zeros(T,lt)
int_APE = zeros(T,lt)
int_APE_RAD = zeros(T,lt)
int_APE_DIA = zeros(T,lt)
int_APE_WN2 = zeros(T,lt)
int_APE_Ub2 = zeros(T,lt)
int_APE_Vb2 = zeros(T,lt)
int_APE_rate = zeros(T,lt)
for timeind in 1:lt
rho01_itp = interpolate((z,), rho0[:,timeind],Gridded(Linear()))
xBar_APE_b21_itp = interpolate((z,), APE_b2[1,1,:,timeind],Gridded(Linear()))
xBar_APE_RAD1_itp = interpolate((z,), xBar_APE_RAD[1,1,:,timeind],Gridded(Linear()))
xBar_APE_DIA1_itp = interpolate((z,), xBar_APE_DIA[1,1,:,timeind],Gridded(Linear()))
xBar_APE_WN21_itp = interpolate((z,), xBar_APE_WN2[1,1,:,timeind],Gridded(Linear()))
xBar_APE_Ub21_itp = interpolate((z,), xBar_APE_Ub2[1,1,:,timeind],Gridded(Linear()))
xBar_APE_Vb21_itp = interpolate((z,), xBar_APE_Vb2[1,1,:,timeind],Gridded(Linear()))
xBar_KE1_itp = interpolate((z,), xBar_KE[1,1,:,timeind],Gridded(Linear()))
xBar_APE_rate1_itp = interpolate((z,), xBar_APE_rate[:,timeind],Gridded(Linear()))
for zeta in z1
mass = dz*rho01_itp(zeta)
int_mass[timeind] += mass
int_APE[timeind] += mass*xBar_APE_b21_itp(zeta)
int_APE_RAD[timeind] += mass*xBar_APE_RAD1_itp(zeta)
int_APE_DIA[timeind] += mass*xBar_APE_DIA1_itp(zeta)
int_APE_WN2[timeind] += mass*xBar_APE_WN21_itp(zeta)
int_APE_Ub2[timeind] += mass*xBar_APE_Ub21_itp(zeta)
int_APE_Vb2[timeind] += mass*xBar_APE_Vb21_itp(zeta)
int_KE[timeind] += mass*xBar_KE1_itp(zeta)
int_APE_rate[timeind] += mass*xBar_APE_rate1_itp(zeta)
end
end
residual = int_APE_rate .+ int_APE_Ub2 .+ int_APE_Vb2 .+ int_APE_WN2 .- (int_APE_RAD .+ int_APE_DIA .+ xBar_APE_Fs)
return (int_mass, int_KE, int_APE, int_APE_rate, int_APE_Ub2,int_APE_Vb2, int_APE_WN2, int_APE_RAD, int_APE_DIA, xBar_APE_Fs, residual)
end
"""
-------------Computes the APE budgets budget---------------
"""
function getapebudget(B, U,V, W, N2, RAD_b, Fs, Diabatic_other, rho0, x,y, z, t, dx,dy, dz, dt, z_up)
N2 = reshape(N2,1,1,length(z),length(t))
rho0 = reshape(rho0,1,1,length(z),length(t))
#***********Empty array generation***********#
T = eltype(B)
lx = length(x)
ly = length(y)
lt = length(t)
lz = length(z)
buf = similar(U)
buf_2d = similar(Fs)
xBar_KE = Array{T}(undef,1,1,lz, lt)
APE_b2 = Array{T}(undef,1,1,lz, lt)
xBar_APE_rate = Array{T}(undef,1,1,lz, lt)
xBar_APE_Ub2 = Array{T}(undef,1,1,lz, lt)
xBar_APE_Vb2 = Array{T}(undef,1,1,lz, lt)
xBar_APE_WN2 = Array{T}(undef,1,1,lz, lt)
xBar_APE_RAD = Array{T}(undef,1,1,lz, lt)
xBar_APE_DIA = Array{T}(undef,1,1,lz, lt)
xBar_APE_Fs = Array{T}(undef,1,1,lt)
neighborx(indx,lx) = mod1(indx+1,lx)
onex = CartesianIndex((1,0,0,0))
oney = CartesianIndex((0,1,0,0))
onet = CartesianIndex((0,0,0,1))
# for ind in CartesianIndices((1:(sx - 1), 2:(sy - 1), 1:sz, 2:(st-1)))
#************* APE **************
@. buf = B*B/2
mean!(APE_b2,buf./N2)
#************ KE ********************
@. buf = U*U/2 + V*V/2
xBar_KE = mean!(xBar_KE,buf)
#************ APE rate ***************
@inbounds for it in 1:(lt -1), iz in 1:lz, iy in 1:ly, ix in 1:lx
ind = CartesianIndex((ix,iy,iz,it))
buf[ind] = B[ind]*(B[ind + onet] - B[ind]) / N2[1,1,iz,it] / dt
end
@inbounds for ind in CartesianIndices((1:lx,1:ly,1:lz,lt:lt))
buf[ind] = buf[ind-onet]
end
mean!(xBar_APE_rate,buf);
#************* Advection **************
@inbounds for it in 1:(lt - 1), iz in 1:lz, iy in 1:ly, ix in 1:lx
ind = CartesianIndex((ix,iy,iz,it))
buf[ind] = B[ind]*U[ind]*(B[neighborx(ix,lx),iy,iz,it] - B[ind]) / N2[1,1,iz,it] /dx
end
@inbounds for ind in CartesianIndices((1:lx,1:ly,1:lz,lt:lt))
buf[ind] = buf[ind-onet]
end
mean!(xBar_APE_Ub2,buf)
@inbounds for it in 1:lt, iz in 1:lz, iy in 1:ly, ix in 1:lx
ind = CartesianIndex((ix,iy,iz,it))
buf[ind] = B[ind]*V[ind]*(B[ix,neighborx(iy,ly),iz,it] - B[ind]) / N2[1,1,iz,it] /dy
end
@inbounds for ind in CartesianIndices((1:lx,1:ly,1:lz,lt:lt))
buf[ind] = buf[ind-onet]
end
mean!(xBar_APE_Vb2,buf)
################################# static stability WN2
@. buf = W*B
@inbounds for ind in CartesianIndices((1:lx,1:ly,1:lz,lt:lt))
buf[ind] = buf[ind-onet]
end
mean!(xBar_APE_WN2,buf)
# RAD generation
@. buf = RAD_b*B/N2
@inbounds for ind in CartesianIndices((1:lx,1:ly,1:lz,lt:lt))
buf[ind] = buf[ind-onet]
end
mean!(xBar_APE_RAD,buf);
########################## Diabatic_other
@. buf = Diabatic_other*B/N2
@inbounds for ind in CartesianIndices((1:lx,1:ly,1:lz,lt:lt))
buf[ind] = buf[ind-onet]
end
mean!(xBar_APE_DIA,buf)
##################### Surface fluxes contribution
@inbounds for tind in 1:lt
for yind in 1:ly
for xind in 1:lx
buf_2d[xind,yind,tind] = B[xind,yind,1,tind]*Fs[xind,yind,tind]/N2[1,1,1,tind]
end
end
end
@inbounds for ind in CartesianIndices((1:lx,1:ly,lt:lt))
buf_2d[ind] = buf_2d[ind - CartesianIndex((0,0,1))]
end
mean!(xBar_APE_Fs,buf_2d)
xBar_APE_Fs = dropdims(xBar_APE_Fs, dims=(1,2))
##################### interpolation
k_up = argmin(abs.(z.-z_up));
z1 = z[1]:dz:z[k_up];
int_mass = zeros(T,lt)
int_KE = zeros(T,lt)
int_APE = zeros(T,lt)
int_APE_RAD = zeros(T,lt)
int_APE_DIA = zeros(T,lt)
int_APE_WN2 = zeros(T,lt)
int_APE_Ub2 = zeros(T,lt)
int_APE_Vb2 = zeros(T,lt)
int_APE_rate = zeros(T,lt)
@inbounds for time in 1:lt
rho01_itp = interpolate((z,), rho0[1,1,:,time],Gridded(Linear()))
xBar_APE_b21_itp = interpolate((z,), APE_b2[1,1,:,time],Gridded(Linear()))
xBar_APE_RAD1_itp = interpolate((z,), xBar_APE_RAD[1,1,:,time],Gridded(Linear()))
xBar_APE_DIA1_itp = interpolate((z,), xBar_APE_DIA[1,1,:,time],Gridded(Linear()))
xBar_APE_WN21_itp = interpolate((z,), xBar_APE_WN2[1,1,:,time],Gridded(Linear()))
xBar_APE_Ub21_itp = interpolate((z,), xBar_APE_Ub2[1,1,:,time],Gridded(Linear()))
xBar_APE_Vb21_itp = interpolate((z,), xBar_APE_Vb2[1,1,:,time],Gridded(Linear()))
xBar_KE1_itp = interpolate((z,), xBar_KE[1,1,:,time],Gridded(Linear()))
xBar_APE_rate1_itp = interpolate((z,), xBar_APE_rate[1,1,:,time],Gridded(Linear()))
@inbounds for zeta in z1
mass = dz*rho01_itp(zeta)
int_mass[time] += mass
int_APE[time] += mass*xBar_APE_b21_itp(zeta)
int_APE_RAD[time] += mass*xBar_APE_RAD1_itp(zeta)
int_APE_DIA[time] += mass*xBar_APE_DIA1_itp(zeta)
int_APE_WN2[time] += mass*xBar_APE_WN21_itp(zeta)
int_APE_Ub2[time] += mass*xBar_APE_Ub21_itp(zeta)
int_APE_Vb2[time] += mass*xBar_APE_Vb21_itp(zeta)
int_KE[time] += mass*xBar_KE1_itp(zeta)
int_APE_rate[time] += mass*xBar_APE_rate1_itp(zeta)
end
end
residual = int_APE_rate .+ int_APE_Ub2 .+ int_APE_Vb2 .+ int_APE_WN2 .- (int_APE_RAD .+ int_APE_DIA .+ xBar_APE_Fs)
return (int_mass, int_KE, int_APE, int_APE_rate, int_APE_Ub2,int_APE_Vb2, int_APE_WN2, int_APE_RAD, int_APE_DIA, xBar_APE_Fs, residual)
end
"""
-------------This function computes the buoyancy budget---------------
Inputs:
B (buoyancy)
RAD_b the radiative heating, converted to units of buoyancy
Fs the Surface fluxes
U,V,W The three dimensional velocities
N2 The Brunt Va"isala frequency squared
dx,dy,dz,dt the steps in each coordinate
x,y,z,t the coordinate vectors
"""
function buoyancybudget_old(B, RAD_b, Fs, U,V, W, N2, dx,dy, dz, dt, x,y, z, t)
#************ Array creation **************#
Qs = zeros(typeof(B[1]),length(x),length(y),length(z),length(t))
B_ghost = Array{typeof(B[1])}(undef, length(x)+1,length(y)+1, length(z), length(t))
WN2 = similar(W)
dBdt = similar(B)
UdBdx = similar(U)
VdBdy = similar(U)
#************ WN2 *****************
WN2 .= reshape(N2,(1,1,size(N2,1),size(N2,2))).*W
#************* Advection **************
@. B_ghost[1:end-1,1:end-1,:,:] = B
@. B_ghost[end,1:end-1,:,:] = B[1,:,:,:]
@. B_ghost[1:end-1,end,:,:] = B[:,1,:,:]
@. UdBdx = U*(B_ghost[2:end,1:end-1,:,:]-B_ghost[1:end-1,1:end-1,:,:])/dx
@. VdBdy = V*(B_ghost[1:end-1,2:end,:,:]-B_ghost[1:end-1,1:end-1,:,:])/dy
# @. UdBdx[1:end-1,:,:,:] .= U[1:end-1,:,:,:]*(B[2:end,:,:,:]-B[1:end-1,:,:,:])/dx
# @. UdBdx[end,:,:,:] = U[end,:,:,:]*(B[1,:,:,:]-B[end,:,:,:])/dx
# @. VdBdy[:,1:end-1,:,:] = V[:,1:end-1,:,:].*(B[:,2:end,:,:]-B[:,1:end-1,:,:])/dy
# @. VdBdy[:,end,:,:] = V[:,end,:,:]*(B[:,1,:,:]-B[:,end,:,:])/dy
Qs[:,:,1,:] .= Fs./dz
#************ Time evolution *************#
@. dBdt[:,:,:,1:end-1] = (B[:,:,:,2:end] - B[:,:,:,1:end-1])/dt;
@. dBdt[:,:,:,end] = dBdt[:,:,:,end-1]/dt
#*************** Return ********************#
Diabatic_other = dBdt .+ UdBdx .+ VdBdy .+ WN2 .- RAD_b .- Qs
return dBdt, UdBdx,VdBdy, WN2, Qs, Diabatic_other
end
"""
-------------This function computes the buoyancy budget---------------
Inputs:
B (buoyancy)
RAD_b the radiative heating, converted to units of buoyancy
Fs the Surface fluxes
U,V,W The three dimensional velocities
N2 The Brunt Va"isala frequency squared
dx,dy,dz,dt the steps in each coordinate
x,y,z,t the coordinate vectors
"""
function buoyancybudget(B, RAD_b, Fs, U,V, W, N2, dx,dy, dz, dt, x,y, z, t)
#************ Array creation **************#
#Qs = zeros(eltype(B),length(x),length(y),length(z),length(t))
#B_ghost = Array{typeof(B[1])}(undef, length(x)+1,length(y)+1, length(z), length(t))
WN2 = similar(W)
dBdt = similar(B)
UdBdx = similar(U)
VdBdy = similar(U)
#************ WN2 *****************
WN2 .= reshape(N2,(1,1,size(N2,1),size(N2,2))).*W
#************* Advection **************
@. @views UdBdx[1:end-1,:,:,:] = U[1:end-1,:,:,:]*(B[2:end,:,:,:]-B[1:end-1,:,:,:])/dx
@. @views UdBdx[end,:,:,:] = U[end,:,:,:]*(B[1,:,:,:]-B[end,:,:,:])/dx
@. @views VdBdy[:,1:end-1,:,:] = V[:,1:end-1,:,:]*(B[:,2:end,:,:]-B[:,1:end-1,:,:])/dy
@. @views VdBdy[:,end,:,:] = V[:,end,:,:]*(B[:,1,:,:]-B[:,end,:,:])/dy
#@. Qs[:,:,1,:] = Fs/dz
#************ Time evolution *************#
@. @views dBdt[:,:,:,1:end-1] = (B[:,:,:,2:end] - B[:,:,:,1:end-1])/dt
@. @views dBdt[:,:,:,end] = dBdt[:,:,:,end-1]
#*************** Return ********************#
Diabatic_other = dBdt .+ UdBdx .+ VdBdy .+ WN2 .- RAD_b
Diabatic_other[:,:,1,:] .= @views Diabatic_other[:,:,1,:] - Fs/dz
# return dBdt, UdBdx,VdBdy, WN2, Fs/dz, Diabatic_other
return Diabatic_other
end
function get_diabatic_as_residual_buoyancy(B, RAD_b, Fs, U,V, W, N2, dx,dy, dz, dt)
#************ Array creation **************#
#Qs = zeros(eltype(B),length(x),length(y),length(z),length(t))
#B_ghost = Array{typeof(B[1])}(undef, length(x)+1,length(y)+1, length(z), length(t))
Diabatic_other = similar(B)
onex = CartesianIndex((1,0,0,0))
oney = CartesianIndex((0,1,0,0))
onet = CartesianIndex((0,0,0,1))
sx,sy,sz,st = size(Diabatic_other)
N2 = reshape(N2,1,1,sz,st)
neighborx(indx,sx) = mod1(indx+1,sx)
# for ind in CartesianIndices((1:(sx - 1), 2:(sy - 1), 1:sz, 2:(st-1)))
#### derivative
@inbounds for it in 1:(st - 1), iz in 1:sz, iy in 1:sy, ix in 1:sx
ind = CartesianIndex((ix,iy,iz,it))
Diabatic_other[ind] = (B[ind+onet] - B[ind])/dt + U[ind]*(B[neighborx(ix,sx),iy,iz,it] -
B[ind])/dx + V[ind]*(B[ix,neighborx(iy,sy),iz,it] - B[ind])/dy +
W[ind]*N2[1,1,iz,it] - RAD_b[ind]
end
@inbounds for it in 1:(st - 1), iy in 1:sy, ix in 1:sx
Diabatic_other[ix,iy,1,it] = Diabatic_other[ix,iy,1,it] - Fs[ix,iy,it]/dz
end
# for it in st:st, iz in 1:sz, iy in 1:(sy - 1), ix in 1:(sx - 1)
@inbounds for ind in CartesianIndices((1:sx,1:sy,1:sz,st:st))
Diabatic_other[ind] = Diabatic_other[ind-onet]
end
return Diabatic_other
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 2357 |
function cutborders!(array::Array{T,4},smooth_time,position) where T <: Real
if position == 1
array = array[:,:,:,1+div((smooth_time-1),2):end]
elseif position == 2
array = array[:,:,:,1+div((smooth_time-1),2):end-div((smooth_time-1),2)]
elseif position == 3
array = array[:,:,:,1:end - div((smooth_time-1),2)]
end
return array
end
function cutborders!(array::Array{T,3},smooth_time,position) where T <: Real
if position == 1
array = array[:,:,1+div((smooth_time-1),2):end]
elseif position == 2
array = array[:,:,1+div((smooth_time-1),2):end-div((smooth_time-1),2)]
elseif position == 3
array = array[:,:,1:end - div((smooth_time-1),2)]
end
return array
end
function cutborders!(array::Array{T,1},smooth_time,position) where T <: Real
if position == 1
array = array[1+div((smooth_time-1),2):end]
elseif position == 2
array = array[1+div((smooth_time-1),2):end-div((smooth_time-1),2)]
elseif position == 3
array = array[1:end-div((smooth_time-1),2)]
end
return array
end
function getsmoothdata!(U,V, W, Tv, ThetaV, RAD, Fs, smooth_x,smooth_y,smooth_time,position)
buf3d = similar(U)
buf2d = similar(Fs)
filter_array!(buf3d,U,smooth_x,smooth_time,position)
filter_array!(buf3d,V,smooth_x,smooth_time,position)
filter_array!(buf3d,W,smooth_x,smooth_time,position)
filter_array!(buf3d,Tv,smooth_x,smooth_time,position)
filter_array!(buf3d,ThetaV,smooth_x,smooth_time,position)
filter_array!(buf3d,RAD,smooth_x,smooth_time,position)
filter_array!(buf2d,Fs,smooth_x,smooth_time,position)
#return U, V, W, Tv, ThetaV, RAD, Fs
end
function getsmoothdata_nospace(U::Array{T,4},V::Array{T,4}, W::Array{T,4}, Tv::Array{T,4}, ThetaV::Array{T,4}, RAD::Array{T,4}, Fs::Array{T,3}, smooth_x::Int,smooth_y::Int,smooth_time::Int,position::Int) where T <: Real
U = filter_array_nospace(U,smooth_time,position)
V = filter_array_nospace(V,smooth_time,position)
W = filter_array_nospace(W,smooth_time,position)
Tv = filter_array_nospace(Tv,smooth_time,position)
ThetaV = filter_array_nospace(ThetaV,smooth_time,position)
RAD = filter_array_nospace(RAD,smooth_time,position)
Fs = filter_array_nospace(Fs,smooth_time,position)
return U, V, W, Tv, ThetaV, RAD, Fs
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 12262 | """
kernel_1d(window)
Create a kernel to perform a moving average filtering of a 1-d array. It will use window as windows size unless the given vaule is even, in which case it will add one to the given value before creating the kernel. This kernel has to be passed into imfilter or imfilter!
"""
function kernel_1d(window)
if !isodd(window)
window += 1
end
kernel_t = centered(ones(window)./(window))
end
"""
kernel_4d(window_h,window_t,T :: Type :: Float64)
Create a kernel to perform a moving average filtering of a 4-d array along the dimensions 1,2 and 4. It will use window_h (dimensions 1 and 2) and window_t (dimension 4) as windows sizes unless the given vaules are even, in which case it will add one to the given value before creating the kernel. This kernel has to be passed into imfilter or imfilter!
"""
function kernel_4d(window_h,window_t,T::Type=Float64)
kernel_h = if (window_h == 1) | (window_h == false)
centered([T(1.0)])
else
!isodd(window_h) && (window_h += 1 )
ones(T,window_h)./(window_h)
end
if !isodd(window_t)
window_t += 1
end
kernel_t = if (window_t == 1) | (window_t == false)
centered([T(1.0)])
else
!isodd(window_t) && (window_t += 1 )
ones(T,window_t)./(window_t)
end
return kernelfactors(( kernel_h, kernel_h, centered([T(1.0)]) , kernel_t ))
end
"""
kernel_3d(window_h,window_t,T :: Type :: Float64)
Create a kernel to perform a moving average filtering of a 3-d array along the dimensions 1,2 and 3. It will use window_h (dimensions 1 and 2) and window_t (dimension 3) as windows sizes unless the given vaules are even, in which case it will add one to the given value before creating the kernel. This kernel has to be passed into imfilter or imfilter!
"""
function kernel_3d(window_h,window_t,T::Type=Float64)
if !isodd(window_h)
window_h += 1
end
if !isodd(window_t)
window_t += 1
end
kernel_h = ones(T,window_h)./(window_h)
return kernelfactors(( centered(kernel_h), centered(kernel_h), centered([T(1.0)]) ))
end
"""
kernel_2d(window_h,T :: Type :: Float64)
Create a kernel to perform a moving average filtering of a 2-d array along the dimensions 1,2. It will use window_h as windows size unless the given vaule is even, in which case it will add one to the given value before creating the kernel. This kernel has to be passed into imfilter or imfilter!
"""
function kernel_2d(window_h,T::Type=Float64)
if !isodd(window_h)
window_h += 1
end
kernel_h = ones(T,window_h)./(window_h)
return kernelfactors(( centered(kernel_h), centered(kernel_h) ))
end
"""
kernel_4d_t(window_t,T :: Type :: Float64)
Create a kernel to perform a moving average filtering of a 4-d array along the dimension 4. It will use window_t as windows size unless the given vaule is even, in which case it will add one to the given value before creating the kernel. This kernel has to be passed into imfilter or imfilter!
"""
function kernel_4d_t(window_t,T::Type=Float64)
if !isodd(window_t)
window_t += 1
end
kernel_t = ones(T,window_t)./(window_t)
return kernelfactors(( centered([T(1.0)]), centered([T(1.0)]), centered([T(1.0)]), centered(kernel_t) ))
end
"""
kernel_3d_t(window_t,T :: Type :: Float64)
Create a kernel to perform a moving average filtering of a 3-d array along the dimension 3. It will use window_t as windows size unless the given vaule is even, in which case it will add one to the given value before creating the kernel. This kernel has to be passed into imfilter or imfilter!
"""
function kernel_3d_t(window_t,T::Type=Float64)
if !isodd(window_t)
window_t += 1
end
kernel_t = ones(T,window_t)./(window_t)
return kernelfactors(( centered([T(1.0)]), centered([T(1.0)]), centered(kernel_t) ))
end
"""
filter_array_2!(array,smooth_x,smooth_t,position)
Filter the input array _in-place_ using a moving mean.
In the first two dimensions the window width is smooth_x and the border is treated as circular (for doubly periodic domains). In the space dimension the width is smooth_t and the border value is replicated.
This function calls under the hood the imfilter function of Images.jl
"""
function filter_array_2!(buf :: Array{T,4},array::Array{T,4},smooth_x,smooth_time,position) where T <: Real
sx,sy,sz,st = size(array)
# filtered = similar(array)
if position == 2
filtered = imfilter(imfilter(array, kernel4d(smooth_x,smooth_time),"circular"),kernel_4d_t(smooth_time),"inner")
else
###Filtering in space
@info typeof(array) typeof(buf)
@inbounds for t in 1:st
@Threads.threads for z in 1:sz
imfilter!(view(buf,:,:,z,t),array[:,:,z,t], kernel_2d(smooth_x),"circular")
end
end
@info typeof(array) typeof(buf)
### filtering in time
@inbounds for z in 1:sz, y in 1:sy
@Threads.threads for x in 1:sx
array[x,y,z,:] .= imfilter(buf[x,y,z,:], kernel_1d(smooth_time),"symmetric")
end
end
end
return array
end
"""
filter_array_2!(array,smooth_x,smooth_t,position)
Filters the input array _in-place_ using a moving mean.
In the first two dimensions the window width is smooth_x and the border is treated as circular (for doubly periodic domains). In the space dimension the width is smooth_t and the border value is replicated.
This function calls under the hood the imfilter function of Images.jl
"""
function filter_array_2!(array::Array{T,3},smooth_x,smooth_time,position) where T <: Real
filtered = similar(array)
sx,sy,st = size(array)
if position == 2
filtered = imfilter(imfilter(array, kernel4d(smooth_x,smooth_time),"circular"),kernel4d_t(smooth_time),"inner")
else
###Filtering in space
for t in 1:st
filtered[:,:,t] = imfilter(filtered,array[:,:,t], kernel_2d(smooth_x),"circular")
end
### filtering in time
for y in 1:sy, x in 1:sx
array[x,y,:] = imfilter(filtered[x,y,:], kernel_1d(smooth_time),"symmetric")
end
end
#return array
end
"""
filter_array!(buffer,array,smooth_x,smooth_t,position)
Filters the input array _in-place_ using a moving mean. In the first two dimensions the window width is smooth_x and the border is treated as circular (for doubly periodic domains). In the space dimension the width is smooth_t and the border value is replicated.
This function calls under the hood the imfilter function of Images.jl
The first argument must be a buffer of the same size of array.
"""
function filter_array!(buf::Array{T,4},array::Array{T,4},smooth_x,smooth_time,position) where T <: Real
if position == 2
error("Filter_array: Inner array is not implemented yet")
else
imfilter!(buf,array,kernel_4d(smooth_x,smooth_time,T)[1:2],"circular")
imfilter!(array,buf,(kernel_4d_t(smooth_time,T)[4],),"symmetric")
end
# return filtered
end
function filter_array!(buf::Array{T,3},array::Array{T,3},smooth_x,smooth_time,position) where T <: Real
if position==2
error("Filter_array: Inner array is not implemented yet")
else
imfilter!(buf,array, kernel_3d(smooth_x,smooth_time,T)[1:2],"circular")
imfilter!(array,buf,(kernel_3d_t(smooth_time,T)[3],),"symmetric")
end
end
"""
filter_array(array,smooth_x,smooth_t,position)
Filters the input arrayusing a moving mean. In the first two dimensions the window width is smooth_x and the border is treated as circular (for doubly periodic domains). In the space dimension the width is smooth_t and the border value is replicated.
This function calls under the hood the imfilter function of Images.jl
The first argument must be a buffer of the same size of array.
"""
function filter_array(array::Array{T,4},smooth_x,smooth_time,position = 1) where T <: Real
if position == 2
asize = size(array)
tail_size = (asize[4]-1)Γ·2
buf1 = Array{T,4}(undef,asize[1],asize[2],asize[3],asize[4] - 2*tail_size)
buf2 = Array{T,4}(undef,asize[1],asize[2],asize[3],asize[4] - 2*tail_size)
buf_axes = axes(buf1)
imfilter!(OffsetArray(buf1,buf_axes[1],buf_axes[2],buf_axes[3],buf_axes[4].+tail_size),array,(kernel_4d_t(smooth_time,T)[3],),Inner())
imfilter!(buf2,buf1, kernel_4d(smooth_x,smooth_time,T)[1:2],"circular")
else
buf1 = similar(array)
buf2 = similar(array)
imfilter!(buf1,array,kernel_4d(smooth_x,smooth_time,T)[1:2],"circular")
imfilter!(buf2,buf1,(kernel_4d_t(smooth_time,T)[4],),"symmetric")
end
return buf2
end
"""
filter_array(array,smooth_x,smooth_t,position)
Filters the input arrayusing a moving mean. In the first two dimensions the window width is smooth_x and the border is treated as circular (for doubly periodic domains). In the space dimension the width is smooth_t and the border value is replicated.
This function calls under the hood the imfilter function of Images.jl
The first argument must be a buffer of the same size of array.
"""
function filter_array(array::Array{T,3},smooth_x,smooth_time,position = 1) where T <: Real
if !isodd(smooth_time)
smooth_time += 1
end
if position==2
asize = size(array)
tail_size = (asize[3]-1)Γ·2
buf1 = Array{T,3}(undef,asize[1],asize[2],asize[3] - 2*tail_size)
buf2 = Array{T,3}(undef,asize[1],asize[2],asize[3] - 2*tail_size)
buf_axes = axes(buf1)
imfilter!(OffsetArray(buf1,buf_axes[1],buf_axes[2],buf_axes[3].+tail_size),array,(kernel_3d_t(smooth_time,T)[3],),Inner())
imfilter!(buf2,buf1, kernel_3d(smooth_x,smooth_time,T)[1:2],"circular")
else
buf1 = similar(array)
buf2 = similar(array)
imfilter!(buf1,array, kernel_3d(smooth_x,smooth_time,T)[1:2],"circular")
imfilter!(buf2,buf1,(kernel_3d_t(smooth_time,T)[3],),"symmetric")
end
return buf2
end
"""
filter_array_nospace(array,smooth_t,position)
Filters the input array using a moving mean along the fourth dimension. In the 4th dimension the width is smooth_t and the border value is replicated except if position = 2, in which case it only takes the inner part of the smoothed array.
This function calls under the hood the imfilter function of Images.jl
"""
function filter_array_nospace(array::Array{T,4},smooth_time,position = 1) where T <: Real
if position==2
filtered = imfilter(array,kernel_4d_t(smooth_time),"inner")
else
filtered = imfilter(array,kernel_4d_t(smooth_time),"replicate")
end
return filtered
end
"""
filter_array_nospace(array,smooth_t,position)
Filters the input array using a moving mean along the third dimension. In the 3th dimension the width is smooth_t and the border value is replicated except if position = 2, in which case it only takes the inner part of the smoothed array.
This function calls under the hood the imfilter function of Images.jl
"""
function filter_array_nospace(array::Array{T,3},smooth_time,position = 1) where T <: Real
if position==2
filtered = imfilter(array,kernel_3d_t(smooth_time),"inner")
else
filtered = imfilter(array,kernel_3d_t(smooth_time),"replicate")
end
return filtered
end
"""
filter_array_nospace(array,smooth_t,position=1)
Filters the input, 1-d array. The border value is replicated except if position = 2, in which case it only takes the inner part of the smoothed array.
This function calls under the hood the imfilter function of Images.jl
"""
function filter_array_time(array::Array{T,1},window,position = 1) where T <: Real
kern = centered(ones(window)./window)
if position==2
smooth_t = imfilter(array,kern,"inner")
else
smooth_t = imfilter(array,kern,"symmetric")
end
return smooth_t
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 17452 | abstract type CycloneDetectionAlgorithm end
struct PressureMinima <: CycloneDetectionAlgorithm end
struct VorticityMaxima <: CycloneDetectionAlgorithm end
"""
smooth_and_mask(surf_pres_anomaly, threshold = -9)
Takes a 2d array and smooths it using a rolling median filter.
It then returns the elements of the filtered array whose values are less than the threshold.
"""
function smooth_and_mask(surf_pres_anomaly,threshold=-9, resolution = 2000; target_lengthscale = 30000)
buf1 = similar(surf_pres_anomaly)
buf2 = similar(buf1)
smooth_and_mask!(buf1,buf2,surf_pres_anomaly,threshold,resolution; target_lengthscale)
return buf2
end
"""
smooth_and_mask!(buf,surf_pres_anomaly, threshold = -9)
Takes a 2d array and smooths it using a rolling median filter.
It then returns the elements of the filtered array whose values are less than the threshold.
"""
function smooth_and_mask!(buf,buf2,target_variable,threshold=-9, resolution = 2000; target_lengthscale = 30000)
windowsize = target_lengthscale Γ· (2*resolution) # Length order of magnitude of the eye
windowsize % 2 == 0 ? windowsize = windowsize + 1 : windowsize
mapwindow!(median!,buf,target_variable,[windowsize,windowsize],border="circular");
imfilter!(buf2,buf,Kernel.gaussian(3),"circular");
@inbounds @simd for ind in eachindex(buf2)
if buf2[ind] >= threshold
buf2[ind] = -9999
end
end
return buf2
end
"""
findcyclonecenters_aspressureminima(surf_pres_anomaly,detection_threshold)
Takes a surface pressure anomaly array surf_pres_anomaly[x,y] = surf_pres[x,y] .- mean(surf_pres,dims(1,2))
a detection threshold for the anomaly, and return an array of tuples (x,y)
where each tuple represents the centers of cyclones identified as the minima of the anomaly.
"""
function findcyclonecenters(cyclone_detection_algorithm,target_variable,detection_threshold;grid_spacing=2000, target_lengthscale = 30000)
buf1 = similar(target_variable)
buf2 = similar(buf1)
peaks = findcyclonecenters!(cyclone_detection_algorithm,buf1,buf2,target_variable,detection_threshold;grid_spacing, target_lengthscale)
return peaks
end
"""
findcyclonecenters_aspressureminima!(buf1,buf2,surf_pres_anomaly,detection_threshold)
Takes a surface pressure anomaly array surf_pres_anomaly[x,y] = surf_pres[x,y] .- mean(surf_pres,dims(1,2))
a detection threshold for the anomaly, and return an array of tuples (x,y)
where each tuple represents the centers of cyclones identified as the minima of the anomaly.
"""
function findcyclonecenters!(::VorticityMaxima,buf1,buf2,target_variable,detection_threshold;grid_spacing=2000, target_lengthscale = 30000)
filtered_variable = smooth_and_mask!(buf1,buf2,target_variable,detection_threshold,grid_spacing; target_lengthscale);
peaks = findlocalmaxima(filtered_variable,[1,2],false);
return peaks
end
"""
findcyclonecenters_aspressureminima!(buf1,buf2,surf_pres_anomaly,detection_threshold)
Takes a surface pressure anomaly array surf_pres_anomaly[x,y] = surf_pres[x,y] .- mean(surf_pres,dims(1,2))
a detection threshold for the anomaly, and return an array of tuples (x,y)
where each tuple represents the centers of cyclones identified as the minima of the anomaly.
"""
function findcyclonecenters!(::PressureMinima,buf1,buf2,target_variable,detection_threshold;grid_spacing=2000, target_lengthscale = 30000)
filtered_variable = smooth_and_mask!(buf1,buf2,target_variable,detection_threshold,grid_spacing; target_lengthscale);
peaks = findlocalminima(filtered_variable,[1,2],false);
return peaks
end
"""
neighbours_2d(arraysize,indicesCartesian)
Compute de neighboring points of an index in a 2-d array considering periodic boundaries
"""
function neighbours_2d(arraysize,indicesCartesian)
decrement_mod1(num, n) = num == 1 ? n : num - 1
increment_mod1(num, n) = num == n ? one(num) : num + 1
x_ind = indicesCartesian[1]
y_ind = indicesCartesian[2]
size_x = arraysize[1]
size_y = arraysize[2]
@inbounds neighbours = Iterators.product(
(decrement_mod1(x_ind,size_x), x_ind, increment_mod1(x_ind,size_x)),
(decrement_mod1(y_ind,size_y), y_ind, increment_mod1(y_ind,size_y)) )
return CartesianIndex.(neighbours)
end
"""
function detect_cyclones!(buf1, buf2,pressure_anomaly,pressure_threshold,resolution)
receive 2 buffers and a pressure anomaly and returns a segmented image with the cyclones
"""
function detect_cyclones(cyclone_detection_algorithm,target_variable,detection_threshold,resolution; target_lengthscale = 30000)
buf1 = similar(target_variable)
buf2 = similar(buf1)
frame_with_detected_cyclones = detect_cyclones!(cyclone_detection_algorithm,buf1,buf2,target_variable,detection_threshold,resolution; target_lengthscale)
return frame_with_detected_cyclones
end
"""
function detect_cyclones!(buf1, buf2,pressure_anomaly,pressure_threshold,resolution)
receive 2 buffers and a pressure anomaly and returns a segmented image with the cyclones
"""
function detect_cyclones!(cyclone_detection_algorithm,buf1,buf2,target_variable,detection_threshold,resolution; target_lengthscale = 30000)
neighbourhood_gen(arraysize) = point -> AvailablePotentialEnergyFramework.neighbours_2d(arraysize,point)
centers = findcyclonecenters!(cyclone_detection_algorithm,buf1,buf2,target_variable,detection_threshold; grid_spacing = resolution, target_lengthscale)#buf2 is the masked array
#@info centers
if length(centers) == 0
return (nothing,nothing)
end
centers_and_labels = [(centers[i],i) for i in 1:length(centers)]
#mask = pres_anomaly .<= pressure_threshold
push!(centers_and_labels,(findfirst(==(-9999),buf2),1000))
cyclones = seeded_region_growing(buf2,centers_and_labels,neighbourhood_gen(size(target_variable))) #Many allocations? this may be the culprit
return (centers_and_labels,cyclones)
end
"""
isinteracting(adjacencyMatrix :: SparseMatrixCSC,regionnumber)
Computes the adjacency of identified cyclones and returns true if the cylone-number is adjacent to a region other than the background.
"""
function isinteracting(adjacencyMatrix :: SparseMatrixCSC{T,N},regionnumber) where {T,N}
@inbounds for i in 1:(adjacencyMatrix.m - 1)
@views adjacencyMatrix[regionnumber,i] == 1 && (return true)
end
return false
end
"""
isinteracting(cyclones :: SegmentedImage,regionnumber)
Computes the adjacency of identified cyclones and returns true if the cylone-number is adjacent to a region other than the background.
"""
function isinteracting(cyclones :: SegmentedImage,regionnumber)
adjacency, vert_map = region_adjacency_graph(cyclones,(i,j)->1)
@inbounds for i in 1:(adjacency.weights.m - 1)
adjacency.weights[regionnumber,i] == 1 ? (return true) : nothing
end
return false
end
"""
add_allcyclones(array :: Array{T,2},radius_bins,array :: Array{T,3},segmentedcyclones,cyclonescenters,gridspacing)
Compute the azimuthal average of some quantity around a center. Repeats the process and averages about all the tropical cyclones detected on the array.
It receives an array with the radius bins to use,the field to average, called `array`, each cyclone as a SegmentedImage,the centers of the cyclones and the gridspacing.
"""
function add_allcyclones(array,segmentedcyclones,cyclonescenters;maskcyclones = true)
addition = zeros(eltype(array),size(array))
buf1 = similar(addition)
buf2 = similar(addition)
cyclonecount = add_allcyclones!(addition,buf1,buf2,array,segmentedcyclones,cyclonescenters;maskcyclones)
return (cyclonecount,addition)
end
"""
add_allcyclones!(addition :: Array{T,2}, buf :: Array{T,2},array :: Array{T,2},radius_bins,array :: Array{T,3},segmentedcyclones,cyclonescenters,gridspacing)
Compute the azimuthal average of some quantity around a center. Repeats the process and averages about all the tropical cyclones detected on the array.
It receives an array with the radius bins to use,the field to average, called `array`, each cyclone as a SegmentedImage,the centers of the cyclones and the gridspacing.
"""
function add_allcyclones!(addition,buf1,buf2,array,segmentedcyclones,cyclonescenters;maskcyclones = true)
if !(size(addition) == size(buf1) == size(array))
DimensionMismatch("Addition, buffer and array must all have the same dimensions")
end
center = size(array)[1:2] .Γ· 2
#@info center
adjacency, vert_map = region_adjacency_graph(segmentedcyclones, (i,j)->1)
cyclonecount = 0
if maskcyclones
@inbounds for cyclone in 1:(length(segmentedcyclones.segment_labels)-1)
labelsmap = labels_map(segmentedcyclones)
@inbounds for ind in CartesianIndices(array)
if labelsmap[ind[1],ind[2]] == cyclone
buf2[ind] = array[ind]
else
buf2[ind] = 0.0
end
end
if !isinteracting(adjacency.weights,cyclone)
cyclonecount += 1
addition .+= shifter!(buf1,buf2,center,cyclonescenters[cyclone][1])
end
end
else #no cyclone masking
@inbounds for cyclone in 1:(length(segmentedcyclones.segment_labels)-1)
if !isinteracting(adjacency.weights,cyclone)
cyclonecount += 1
addition .+= shifter!(buf1,array,center,cyclonescenters[cyclone][1])
else
@debug "Cyclone is too close to another one"
end
end
end
return cyclonecount
end
"""
isindexindistancebin(binlimits,index,center = (0,0),gridspacing=1) = (binlimits[1] < distance(index,center,gridspacing) <= binlimits[2]) ? true : false
computes the distance of one index to the origin and returns true if that distance is inside a bin
"""
isindexindistancebin(binlimits,index,center,gridspacing=1) = (binlimits[1] < distance(index,center,gridspacing) <= binlimits[2]) ? true : false
"""
averageallindistance(radiusbin,array :: Array{T,2},mask,center,gridspacing = 1)
Create an average of the quantity in array at all the points located between radiusbin[1] and radiusbin[2] from a center.
The points should be masked by a boolean array. It assumes a uniform gridspacing.
"""
function averageallindistance(radiusbin,array :: Array{T,2},center,gridspacing = one(T)) where T
average = zero(T)
count = 0
@inbounds for index in CartesianIndices(array)
if isindexindistancebin(radiusbin,index,center,gridspacing)
count += 1
average += array[index]
end
end
iszero(count) ? average : average/count
end
"""
averageallindistance(radiusbin,array :: Array{T,3},mask,center,gridspacing = 1)
Create an average of the quantity in array at all the points located between radiusbin[1] and radiusbin[2] from a center.
The points should be masked by a boolean array. It assumes a uniform gridspacing.
"""
function averageallindistance(radiusbin,array :: Array{T,3},center,gridspacing = one{T} ) where T
sx,sy,sz = size(array)
average = zeros(T,sz)
averageallindistance!(average,radiusbin,array,center,gridspacing)
return average
end
"""
averageallindistance!(radiusbin,array :: Array{T,3},center,gridspacing = 1)
Create an average of the quantity in array at all the points located between radiusbin[1] and radiusbin[2] from a center.
The points should be masked by a boolean array. It assumes a uniform gridspacing.
"""
function averageallindistance!(average,radiusbin,array :: Array{T,3},center,gridspacing = 1) where T
sx,sy,sz = size(array)
count = 0
@inbounds for index in CartesianIndices((1:sx,1:sy))
if isindexindistancebin(radiusbin,index,center,gridspacing)
#@info "In distance bin"
count += 1
for idz in 1:sz
average[idz] += array[index[1],index[2],idz]
end
end
end
if !iszero(count)
average .= average./count
end
return average
end
"""
azimuthalaverage_allcyclones(radius_bins,array :: Array{T,3},segmentedcyclones,cyclonescenters,gridspacing)
Compute the azimuthal average of some quantity around a center. Repeats the process and averages about all the tropical cyclones detected on the array.
It receives an array with the radius bins to use,the field to average, called `array`, each cyclone as a SegmentedImage,the centers of the cyclones and the gridspacing.
"""
function azimuthalaverage_allcyclones(radius_bins,array :: Array{T,3},segmentedcyclones,cyclonescenters,gridspacing) where T
G, vert_map = region_adjacency_graph(segmentedcyclones, (i,j)->1)
labelsmap = labels_map(segmentedcyclones)
adjacencymatrix = G.weights
average = zeros(size(array,3),length(radius_bins)-1)
cyclonecount = 0
for cyclone in 1:(length(segmentedcyclones.segment_labels)-1)
if !isinteracting(segmentedcyclones,cyclone)
cyclonecount += 1
for bin in 1:(length(radius_bins) - 1)
average[:,bin] .+= averageallindistance((radius_bins[bin],radius_bins[bin+1]),array,cyclonescenters[cyclone][1],gridspacing)
end
end
end
if !iszero(cyclonecount)
return average./cyclonecount
else
return average
end
end
"""
azimuthalaverage_allcyclones(radius_bins,array :: Array{T,3},segmentedcyclones,cyclonescenters,gridspacing)
Compute the azimuthal average of some quantity around a center. Repeats the process and averages about all the tropical cyclones detected on the array.
It receives an array with the radius bins to use,the field to average, called `array`, each cyclone as a SegmentedImage,the centers of the cyclones and the gridspacing.
"""
function azimuthalaverage_allcyclones(radius_bins,array :: Array{T,2},segmentedcyclones,cyclonescenters,gridspacing) where T
average = zeros(length(radius_bins)-1)
G, vert_map = region_adjacency_graph(segmentedcyclones, (i,j)->1)
labelsmap = labels_map(segmentedcyclones)
adjacencymatrix = G.weights
cyclonecount = 0
for cyclone in 1:(length(segmentedcyclones.segment_labels)-1)
if !isinteracting(segmentedcyclones,cyclone)
cyclonecount += 1
for bin in 1:(length(radius_bins) - 1)
average[bin] += averageallindistance((radius_bins[bin],radius_bins[bin+1]),array,cyclonescenters[cyclone][1],gridspacing)
end
end
end
if !iszero(cyclonecount)
return average./cyclonecount
else
return average
end
end
"""
shifter(array,domain_center,peak)
Returns an array in which a pressure perturbation center is displaced to the center of the domain using circshift.
Using it may assume periodic domain.
Receives and SAM 3D+time or 2D+time array and two tuples, the (x,y) indices of the domain center,
and the (x,y) indices of the location of the pressure perturbation peak.
"""
function shifter(array,domain_center,peak)
if ndims(array)==2
return circshift(array[:,:],[domain_center[1]-peak[1],domain_center[2]-peak[2]]);
elseif ndims(array)==3
return circshift(array[:,:,:],[domain_center[1]-peak[1],domain_center[2]-peak[2],0]);
end
end
"""
shifter!(dest,array,time,domain_center,peak)
Stores in `dest` an array in which a pressure perturbation center is displaced to the center of the domain using circshift.
Using it may assume periodic domain.
Receives and SAM 3D+time or 2D+time array and two tuples, the (x,y) indices of the domain center,
and the (x,y) indices of the location of the pressure perturbation peak.
"""
function shifter!(dest,array,domain_center,peak)
return circshift!(dest,array,[domain_center[1]-peak[1],domain_center[2]-peak[2]]);
end
"""
timemean_nofalseframe(input)
Takes a 3-d or 3-d array and computes the average along the third or fourth dimension, skipping the slices input[:,:,:,i] for which the maxiumum value is 0.0
"""
function timemean_nofalseframe(input::Array{T,4}) where {T<:Real}
sx,sy,sz,st = size(input)
copy = zeros(T,sx,sy,sz)
trueframes = 1
for t in 1:size(input,4)
if maximum(view(input,:,:,:,t)) != 0.0
copy[:,:,:] .+= @views input[:,:,:,t]
trueframes = trueframes + 1
end
end
return copy .= copy ./ trueframes
end
function timemean_nofalseframe(input::Array{T,3}) where {T<:Real}
sx,sy,st = size(input)
copy = zeros(T,sx,sy)
trueframes = 1
for t in 1:size(input,4)
if maximum(view(input,:,:,t)) != 0.0
copy[:,:] .+= @views input[:,:,t]
trueframes = trueframes + 1
end
end
return copy .= copy ./ trueframes
end
function removefalseframes(input::Array{T,4},peaktimes) where {T<:Real}
sizet = length(unique(peaktimes))
copy = zeros(T,size(input,1),size(input,2),size(input,3),sizet)
trueframes = 1
Threads.@threads for t in 1:size(input,4)
if maximum(view(input,:,:,:,t)) != 0
@views copy[:,:,:,trueframes] .= input[:,:,:,t]
trueframes = trueframes + 1
end
end
return copy
end
function removefalseframes(input::Array{T,3},peaktimes) where {T<:Real}
sizet = length(unique(peaktimes))
copy = zeros(T,size(input,1),size(input,2),sizet)
trueframes = 1
Threads.@threads for t in 1:size(input,3)
if maximum(view(input,:,:,t)) != 0
@views copy[:,:,trueframes] .= input[:,:,t]
trueframes = trueframes + 1
end
end
return copy
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 7766 | """
smooth_vars_and_write_to_netcdf!(output_file,input_file,vars_to_smooth,window_h,window_t)
Take a netcdf file, a list of variables and two integers and performs a moving mean smoothing of these variables. Write the smoothed fields into a netcdf file called output_file
"""
function smooth_vars_and_write_to_netcdf!(output_file,input_file,vars_to_smooth,window_h,window_t)
ds_orig = Dataset(input_file)
ds_new = Dataset(output_file,"c", attrib = OrderedDict(
"history" => "Filtered data from file: $input_file, using $window_h points in the space dimensions and $window_t points in the time dimension"
))
# Dimensions
for (dim,dim_size) in ds_orig.dim
@info "Writing $dim coordinate in netcdf file"
flush(stdout)
ds_new.dim[dim] = dim_size # unlimited dimension
v = ds_orig[dim]
create_var = defVar(ds_new,dim, eltype(v.var), dimnames(v),attrib = v.var.attrib)
create_var[:] = v[:]
end
# Declare variables
for current_var in vars_to_smooth
@info "Processing $current_var"
flush(stdout)
v = ds_orig[current_var]
create_var = defVar(ds_new,current_var, eltype(v.var), dimnames(v),attrib = v.var.attrib)
create_var[:] = filter_array(v[:],window_h,window_t)
@info "Finished processing $current_var"
flush(stdout)
end
close(ds_new)
close(ds_orig)
end
const dimensions_of_APE_variables = Dict(
"rho0" => ("z","t"),
"N2" => ("z","t"),
"B" => ("x","y","z","t"),
"QCONVEC" => ("x","y","z","t"),
"QRAD" => ("x","y","z","t"),
"APE" => ("t",),
"KE" => ("t",),
"APERAD" => ("t",),
"APEDIA" => ("t",),
"APEWN2" => ("t",),
"APEUb2" => ("t",),
"APEVb2" => ("t",),
"APErate" => ("t",),
"APESURF" => ("t",)
)
"""
Create NetCDF for storing ape budget variables
"""
function create_APE_netcdf(filename,var_size)
Dataset(filename,"c") do ds
# Dimensions
ds.dim["x"] = var_size[1]
ds.dim["y"] = var_size[2]
ds.dim["z"] = var_size[3]
ds.dim["time"] = Inf # unlimited dimension
# Declare variables
ncx = defVar(ds,"x", Float32, ("x",), deflatelevel=1, attrib = OrderedDict(
"units" => "m",
))
ncy = defVar(ds,"y", Float32, ("y",), deflatelevel=1, attrib = OrderedDict(
"units" => "m",
))
ncz = defVar(ds,"z", Float32, ("z",), deflatelevel=1, attrib = OrderedDict(
"units" => "m",
"long_name" => "height",
))
nctime = defVar(ds,"time", Float32, ("time",), deflatelevel=1, attrib = OrderedDict(
"units" => "d",
"long_name" => "time",
))
ncAPE = defVar(ds,"APE", Float32, ("time",), deflatelevel=1, attrib = OrderedDict(
"long_name" => "Available Potential Energy",
"units" => "J/m^2 ",
))
ncKE = defVar(ds,"KE", Float32, ("time",), deflatelevel=1, attrib = OrderedDict(
"long_name" => "Kinetic energy",
"units" => "J/m^2 ",
))
ncAPERAD = defVar(ds,"APERAD", Float32, ("time",), deflatelevel=1, attrib = OrderedDict(
"long_name" => "Available Potential Energy production by radiative processes",
"units" => "W/m^2 ",
))
ncAPEDIA = defVar(ds,"APEDIA", Float32, ("time",), deflatelevel=1, attrib = OrderedDict(
"long_name" => "Available Potential Energy production by radiative processes other than radiative and surface fluxes",
"units" => "W/m^2 ",
))
ncAPEWN2 = defVar(ds,"APEWN2", Float32, ("time",), deflatelevel=1, attrib = OrderedDict(
"long_name" => "Available Potential Energy conversion into kinetic energy",
"units" => "W/m^2 ",
))
ncAPEUB2 = defVar(ds,"APEUb2", Float32, ("time",), deflatelevel=1, attrib = OrderedDict(
"long_name" => "Available Potential Energy advection by the U component of the wind",
"units" => "W/m^2 ",
))
ncAPEVB2 = defVar(ds,"APEVb2", Float32, ("time",), deflatelevel=1, attrib = OrderedDict(
"long_name" => "Available Potential Energy advection by the V component of the wind",
"units" => "W/m^2 ",
))
ncAPESURF = defVar(ds,"APESURF", Float32, ("time",), deflatelevel=1, attrib = OrderedDict(
"long_name" => "Available Potential Energy production by surface fluxes",
"units" => "W/m^2 ",
))
ncAPErate = defVar(ds,"APErate", Float32, ("time",), deflatelevel=1, attrib = OrderedDict(
"long_name" => "Available Potential Energy rate of change",
"units" => "W/m^2 ",
))
ncrho0 = defVar(ds,"rho0", Float32, ("z","time"), deflatelevel=1, shuffle=true, attrib = OrderedDict(
"units" => "kg/m^3",
"long_name" => "Horizontally averaged density",
))
ncN2 = defVar(ds,"N2", Float32, ("z","time"), deflatelevel=1, shuffle=true, attrib = OrderedDict(
"units" => "1/s^2",
"long_name" => "Brunt-Vaisala frequency",
))
ncQCONVEC = defVar(ds,"QCONVEC", Float32, ("x", "y", "z", "time"), deflatelevel=1, shuffle=true, attrib = OrderedDict(
"long_name" => "Convective heating departure from horizontal mean",
"units" => "m/s^3 ",
))
ncQRAD = defVar(ds,"QRAD", Float32, ("x", "y", "z", "time"), deflatelevel=1, shuffle=true, attrib = OrderedDict(
"long_name" => "Radiative heating rate departure from horizontal mean",
"units" => "m/s^3 ",
))
ncB = defVar(ds,"B", Float32, ("x", "y", "z", "time"), deflatelevel=1, shuffle=true, attrib = OrderedDict(
"long_name" => "Buoyancy rate departure from horizontal mean",
"units" => "m/s^2 ",
))
for itime = 1:var_size[end]
nctime[itime] = itime * 3600.0
end
end
end
"""
Create NetCDF for storing ape budget variables
"""
function set_netcdf_var!(filename,var,data)
@info "Started writing: $var into file: $filename"
n_dimensions = ndims(data)
last_dimension_size = last(size(data))
Dataset(filename,"a") do ds
#for ind in 1:last_dimension_size
# #@show ndims(data),last_dimension_size, ind, selectdim(data,n_dimensions,ind)
# @info "Writing step: " ind
# selectdim(ds[var],n_dimensions,ind)[:] = selectdim(data,n_dimensions,ind)
#end
if n_dimensions == 1
ds[var][:] = data[:]
elseif n_dimensions == 2
ds[var][:,:] = data[:,:]
elseif n_dimensions == 3
ds[var][:,:,:] = data[:,:,:]
elseif n_dimensions == 4
ds[var][:,:,:,:] = data[:,:,:,:]
end
@info "Finished writing: $var into file: $filename"
end
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 731 |
const epsilon = 18.016/28.966
const g = 10u"m/s/s" #acceleration of gravity
struct Substance{T}
cp :: Union{Nothing,T}
cv :: Union{Nothing,T}
R :: Union{Nothing,T}
Lv :: Union{Nothing,T}
Lf :: Union{Nothing,T}
end
Substance{T}(;cp = nothing, cv = nothing, R = nothing, Lv = nothing, Lf = nothing) where T = Substance{T}(cp,cv,R,Lv,Lf)
const Dryair = Substance{Quantity}(
cp = 1006.0u"J/kg/K", #J/kg/k at 1013 hPa
cv = 718.0u"J/kg/K",
R = 287.05u"J/kg/K" # J/kg/k
)
const Liquidwater = Substance{Quantity}(
Lv = 2.5e6u"J/kg", #J/kg
Lf = 3.33e5u"J/kg",
cp = 4200.0u"J/kg/K" #j/kg/k
)
const Watervapor = Substance{Quantity}(
R = 461.52u"J/kg/K" #j/kg/K
)
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 20164 | """
distance(x1,x2,gridspacing :: Number)
Compute the cartesian distance between two points given their indices and the gridspacing. It asummes uniform grid.
"""
function distance(x1,x2,gridspacing :: Number,weight=1)
return gridspacing*hypot( x2[1]-x1[1], x2[2]-x1[2] )
end
"""
compute_N2(xBar_Tv,z)
Take a (1,1,size(z),size(t)) profile of temperature or virtual temperature and return the Brunt - VΓ€isΓ€lΓ€ frequency at each z level and at each t.
"""
function compute_N2(xBar_Tv,z)
T = typeof(ustrip(g)/z[1])
N2 = zeros(T,length(z),size(xBar_Tv,4))
factor = ustrip(g/Dryair.cp)
@views N2[1:end-1,:] .= ustrip(g) .* ( (xBar_Tv[1,1,2:end,:] .- xBar_Tv[1,1,1:end-1,:]) ./(z[2:end] .- z[1:end-1]) .+ factor) ./ xBar_Tv[1,1,1:end-1,:]
@views N2[end,:] .= N2[end-1,:]
ind_smallN2 = findall(abs.(N2) .< 1e-6)
one_z = CartesianIndex(1,0)
@inbounds for ind in ind_smallN2
if 1 < ind[1] < length(z)
N2[ind] = (N2[ind + one_z] + N2[ind - one_z])/2 # If N2 is small, substite by mean of neighbours
elseif ind[1] == 1
N2[ind] = N2[ind + one_z]
elseif ind[1] == length(z)
N2[ind] = N2[ind - one_z]
end
end
return N2
end
function compute_N2(xBar_Tv :: Array{ <:Quantity }, z :: Array{ <:Quantity })
T = typeof(g/z[1])
N2 = zeros(T,length(z),size(xBar_Tv,4))
factor = g/Dryair.cp
@views N2[1:end-1,:] .= g .* ( (xBar_Tv[1,1,2:end,:] .- xBar_Tv[1,1,1:end-1,:]) ./ (z[2:end] .- z[1:end-1]) .+ factor) ./ xBar_Tv[1,1,1:end-1,:]
@views N2[end,:] .= N2[end-1,:]
ind_smallN2 = findall(abs.(N2) .< 1e-6u"s^-2")
one_z = CartesianIndex(1,0)
@inbounds for ind in ind_smallN2
if 1 < ind[1] < length(z)
N2[ind] = (N2[ind + one_z] + N2[ind - one_z])/2 # If N2 is small, substite by mean of neighbours
elseif ind[1] == 1
N2[ind] = N2[ind + one_z]
elseif ind[1] == length(z)
N2[ind] = N2[ind - one_z]
end
end
return N2
end
#WIP
function compute_mse(T,z,qv)
sz = size(T)
return Dryair.cp*T .+ g*reshape(z,(1,1,sz[3],1)) .+ liquidwater.Lv*qv
end
function get_tendency(field :: AbstractArray{T,4}; dt = error("dt is required for the budget computation")) where T
dfield_dt = similar(field)
@. dfield_dt[:,:,:,1:end-1] = @views (field[:,:,:,2:end] - field[:,:,:,1:end-1]) / dt
@. dfield_dt[:,:,:,end] = dfield_dt[:,:,:,end-1]
return dfield_dt
end
function get_tendency(field :: AbstractArray{T,1}; dt = error("dt is required for the budget computation")) where T
dfield_dt = similar(field)
@. dfield_dt[1:end-1] = (field[2:end] - field[1:end-1]) / dt
dfield_dt[end] = dfield_dt[end-1]
return dfield_dt
end
function get_advection_asresidual(tendency,sources...)
return reduce(+,sources) .- tendency
end
# integrate_vertically(field :: AbstractArray{T,4};dz = 1, weight = 1) where {T} = reduce(+,dz*weight.*field,dims=3)
# integrate_vertically(field :: AbstractArray{T,3};dz = 1, weight = 1) where {T} = reduce(+,dz*weight.*field,dims=2)
# integrate_vertically(field :: AbstractArray{T,2};dz = 1, weight = 1) where {T} = reduce(+,dz*weight.*field,dims=2)
# integrate_vertically(field :: AbstractArray{T,1};dz = 1 , weight = 1) where {T} = reduce(+,dz*weight*field)
# function integrate_vertically(field :: AbstractArray{T,4}; coord :: AbstractArray{T,1},weight = 1) where T
# sz = size(field)
# sc = size(coord)
# integral = zeros(eltype(field),(sz[1],sz[2],1,sz[4]))
# @inbounds for ind in CartesianIndices(field)
# if ind[3] != sz[3]
# integral[ind[1],ind[2],1,ind[4]] = weight*(coord[ind[3]+1] - coord[ind[3]]) * field[ind]
# end
# end
# return integral
# end
# function integrate_vertically(field :: AbstractArray{T,4}; coord :: AbstractArray{T,1},weight :: AbstractArray{T,1}) where T
# sz = size(field)
# sc = size(coord)
# integral = zeros(eltype(field),(sz[1],sz[2],1,sz[4]))
# @inbounds for ind in CartesianIndices(field)
# if ind[3] != sz[3]
# integral[ind[1],ind[2],1,ind[4]] += weight[ind[3]]*(coord[ind[3]+1] - coord[ind[3]]) * field[ind]
# end
# end
# return integral
# end
function integrate_horizontally(field :: AbstractArray{T,4}; darea) where T
return darea*reduce(+,field)
end
function integrate_vertically(field :: AbstractArray{T,4}; coord :: AbstractArray{T,1},weight :: AbstractArray{T,4}) where T
sz = size(field)
sc = size(coord)
integral = zeros(eltype(field),(sz[1],sz[2],1,sz[4]))
for ind in CartesianIndices(field)
if ind[3] != sz[3]
integral[ind[1],ind[2],1,ind[4]] += weight[ind]*(coord[ind[3]+1] - coord[ind[3]]) * field[ind]
end
end
return integral
end
"""
compute_virtual_temp(temperature, specific_humidity)
Compute the virtual temperature considering only water vapor mixing ratio.
"""
function compute_virtual_temp(temperature,specific_humidity)
return temperature*(1 + epsilon*specific_humidity)
end
function spatial_derivative!(output,field,dx,dim)
if dim == 1
onex = CartesianIndex((1, ntuple(_->0, ndims(u) - 1)...))
@inbounds for ind in CartesianIndices(field)
if ind[dim] == 1
output[ind] = (field[ind+onex] - field[ind])/dx
elseif ind[dim] == size(field,dim)
output[ind] = (field[ind] - field[ind-onex])/dx
else
output[ind] = (field[ind+onex] - field[ind-onex])/2dx
end
end
elseif dim == 2
oney = CartesianIndex((0,1, ntuple(_->0, ndims(u) - 2)...))
@inbounds for ind in CartesianIndices(field)
if ind[dim] == 1
output[ind] = (field[ind+oney] - field[ind])/dx
elseif ind[dim] == size(field,dim)
output[ind] = (field[ind] - field[ind-oney])/dx
else
output[ind] = (field[ind+oney] - field[ind-oney])/2dx
end
end
end
return output
end
function spatial_derivative(field, dx, dim)
output = similar(field)
return spatial_derivative!(output,field,dx,dim)
end
function get_vorticity!(output,u,v,dx,dy)
## This implementation assumes 4d input
onex = CartesianIndex((1, ntuple(_->0, ndims(u) - 1)...))
oney = CartesianIndex((0,1, ntuple(_->0, ndims(u) - 2)...))
for ind in CartesianIndices(output)
if ind[1] == 1
output[ind] = (v[ind+onex] - v[ind])/dx
elseif ind[1] == size(v,1)
output[ind] = (v[ind] - v[ind-onex])/dx
else
output[ind] = (v[ind+onex] - v[ind-onex])/2dx
end
end
for ind in CartesianIndices(output)
if ind[2] == 1
output[ind] -= (u[ind+oney] - u[ind])/dy
elseif ind[2] == size(u,2)
output[ind] -= (u[ind] - u[ind-oney])/dy
else
output[ind] -= (u[ind+oney] - u[ind-oney])/2dy
end
end
return output
end
function get_divergence!(output,u,v,dx,dy)
## This implementation assumes 4d input
onex = CartesianIndex((1, ntuple(_->0, ndims(u) - 1)...))
oney = CartesianIndex((0,1, ntuple(_->0, ndims(u) - 2)...))
for ind in CartesianIndices(output)
if ind[1] == 1
output[ind] = (v[ind+onex] - v[ind])/dy
elseif ind[1] == size(v,1)
output[ind] = (v[ind] - v[ind-onex])/dy
else
output[ind] = (v[ind+onex] - v[ind-onex])/2dy
end
end
for ind in CartesianIndices(output)
if ind[2] == 1
output[ind] += (u[ind+oney] - u[ind])/dx
elseif ind[2] == size(u,2)
output[ind] += (u[ind] - u[ind-oney])/dx
else
output[ind] += (u[ind+oney] - u[ind-oney])/2dx
end
end
return output
end
function get_divergence(u,v,dx,dy)
output = similar(u)
return get_divergence!(output,u,v,dx,dy)
end
function get_vorticity(u,v,dx,dy)
output = similar(u)
return get_vorticity!(output,u,v,dx,dy)
end
function get_okubo_weiss!(output,u,v,dx,dy)
onex = CartesianIndex((1, ntuple(_->0, ndims(u) - 1)...))
oney = CartesianIndex((0,1, ntuple(_->0, ndims(u) - 2)...))
sz = size(u)
# dvdx
for ind in CartesianIndices(u)
##dudx and dvdx
if ind[1] == 1
dudx = (u[ind+onex] - u[ind])/dx
dvdx = (v[ind+onex] - v[ind])/dx
elseif ind[1] == sz[1]
dudx = (u[ind] - u[ind-onex])/dx
dvdx = (v[ind] - v[ind-onex])/dx
else
dudx = (u[ind+onex] - u[ind-onex])/2dx
dvdx = (v[ind+onex] - v[ind-onex])/2dx
end
#dudy and dvdy
if ind[2] == 1
dvdy = (v[ind+oney] - v[ind])/dy
dudy = (u[ind+oney] - u[ind])/dy
elseif ind[2] == sz[2]
dvdy = (v[ind] - v[ind-oney])/dy
dudy = (u[ind] - u[ind-oney])/dy
else
dvdy = (v[ind+oney] - v[ind-oney])/2dy
dudy = (u[ind+oney] - u[ind-oney])/2dy
end
output[ind] = dudx*dudx + dvdy*dvdy - 2dudx*dvdy + 2dvdx*dudy +
2dvdx*dudy
end
return output
end
function get_okubo_weiss(u,v,dx,dy)
ow = similar(u)
get_okubo_weiss!(ow,u,v,dx,dy)
return ow
end
"""
get_saturation_vapor_pressure(T)
Receive temperature T in Kelvin and compute the saturation vapor pressure in hPa from the August-Roche-Magnus formula that approximates the solution to the Clausius-Clapeyron relationship (Wikipedia contributors. (2020, December 19). ClausiusβClapeyron relation. In Wikipedia, The Free Encyclopedia. Retrieved 06:57, December 20, 2020, from https://en.wikipedia.org/w/index.php?title=Clausius%E2%80%93Clapeyron_relation&oldid=995159175)
"""
function get_saturation_vapor_pressure(T)
return 6.112*exp(17.67 * (T-273.15) / (243.5 + (T - 273.15)))
end
function get_saturation_vapor_pressure(T :: Quantity)
return 6.112u"hPa"*exp(17.67 * (T-273.15u"K") / (243.5u"K" + (T - 273.15u"K")))
end
"""
get_partial_vapor_pressure(mixing_ratio,pressure)
Receive a water vapor mixing ratio (unitless g/g) and environmental pressure and compute the partial pressure of water vapor in the same units as the input pressure.
"""
function get_partial_vapor_pressure(mixing_ratio,pressure)
return mixing_ratio*pressure/(epsilon + mixing_ratio)
end
"""
get_mixing_ratio(water_vapor_partial_pressure,env_pressure)
Receive a water vapor mixing ratio (unitless g/g) and environmental pressure and compute the partial pressure of water vapor in the same units as the incoming pressure.
"""
function get_mixing_ratio(water_vapor_partial_pressure,env_pressure)
return epsilon*water_vapor_partial_pressure/(env_pressure - water_vapor_partial_pressure)
end
"""
get_specific_entropy(temperature,mixing_ratio,pressure)
Receive temperature in Kelvin, water vapor mixing ratio (unitless g/g) and pressure (hPa) and compute the specific entropy of a parcel using equation in Emmanuel's (E94, EQN. 4.5.9)
"""
function get_specific_entropy(temperature,mixing_ratio,pressure)
vapor_pressure = get_partial_vapor_pressure(mixing_ratio,pressure)
saturation_vapor_pressure = get_saturation_vapor_pressure(temperature)
RH = min(vapor_pressure/saturation_vapor_pressure,1.0)
specific_entropy = (Dryair.cp + mixing_ratio * Liquidwater.cp) *
log(temperature/unit(temperature)) - Dryair.R * log((pressure - vapor_pressure)/unit(pressure)) +
Liquidwater.Lv * mixing_ratio / temperature - mixing_ratio * Watervapor.R * log(RH)
end
"""
get_lifted_condensation_level(temperature,relative_humidity,pressure)
Receive temperature in Kelvin, relative humidity (unitless) and pressure (hPa) and compute the lifted condensation level based on Emanuel's E94 "calcsound.f" code at http://texmex.mit.edu/pub/emanuel/BOOK/
"""
function get_lifted_condensation_level(temperature,relative_humidity,pressure)
return pressure * (relative_humidity^(temperature/(1669.0-122.0*relative_humidity-temperature)))
end
function get_lifted_condensation_level(temperature :: Quantity ,relative_humidity :: Quantity ,pressure :: Quantity)
return pressure * (relative_humidity^(temperature/(1669.0u"K"-122.0u"K"*relative_humidity-temperature)))
end
#we need temperature to celsius
#saturation vapor pressure
"""
specific_humidity_to_mixing_ratio(specific_humidity)
Take a specific humidity (unitless g/g) and return a mixing ratio
"""
function specific_humidity_to_mixing_ratio(specific_humidity)
return mixing_ratio = specific_humidity / (1 - specific_humidity)
end
"""
mixing_ratio_to_specific_humidity(mixing_ratio)
Take a mixing ratio (unitless g/g) and return a specific humidity
"""
function mixing_ratio_to_specific_humidity(mixing_ratio)
return q = mixing_ratio / (1 + mixing_ratio)
end
"""
get_virtual_temperature(temperature,mixing_ratio_total_water,mixing_ratio_water_vapor)
Receive temperature (K) and mixing ratios of total water and water vapor (unitless g/g) and compute the virtual temperature
"""
function get_virtual_temperature(temperature,mixing_ratio_total_water,mixing_ratio_water_vapor)
return temperature*(1 + mixing_ratio_water_vapor/epsilon)/(1 + mixing_ratio_total_water)
end
"""
get_buoyancy_of_lifted_parcel(tparcel,rparcel,pparcel,t,r,p,ptop=50)
"""
function get_buoyancy_of_lifted_parcel(tparcel,rparcel,pparcel,t,r,p,ptop=50u"hPa")
n_valid_levels = findfirst(<(ptop),p)
p = p[begin:n_valid_levels]
t = t[begin:n_valid_levels]
r = r[begin:n_valid_levels]
tvirtual_diff_parcel_env = similar(t)
parcel_sat_vapor_pressure = get_saturation_vapor_pressure(tparcel)
parcel_vapor_pressure = get_partial_vapor_pressure(rparcel,pparcel)
parcel_rh = min(parcel_vapor_pressure/parcel_sat_vapor_pressure , 1.0)
parcel_specific_entropy = get_specific_entropy(tparcel,rparcel,pparcel)
parcel_lcl = get_lifted_condensation_level(tparcel,parcel_rh,pparcel)
@show parcel_lcl
below_lcl = findall(>=(parcel_lcl),p)
above_lcl = findall(<(parcel_lcl),p)
#These two must populate buoyancy of lifted parcel_get_vapor_pressure
#this would be adiabatic lifting, easy enough
for level in below_lcl
tlifted = tparcel*(p[level]/pparcel)^(Dryair.R/Dryair.cp)
rlifted = rparcel
tvirtual_lifted = get_virtual_temperature(tlifted,rlifted,rlifted)
tvirtual_env = get_virtual_temperature(t[level],r[level],r[level])
tvirtual_diff_parcel_env[level] = tvirtual_lifted - tvirtual_env
end
#We start with environmental values of temperature, mixing ratio, entropy etc
for level in above_lcl
niter = 0
t_previousiter = t[level]
saturation_vapor_pressure_previousiter = get_saturation_vapor_pressure(t_previousiter)
mixing_ratio_previousiter = get_mixing_ratio(saturation_vapor_pressure_previousiter,p[level])
t_currentiter = 0.0u"K"
mixing_ratio_currentiter = 0.0u"g/g"
while (abs(t_previousiter - t_currentiter) > 0.001u"K" )
niter += 1
t_currentiter = t_previousiter
saturation_vapor_pressure_currentiter = get_saturation_vapor_pressure(t_currentiter)
mixing_ratio_currentiter = get_mixing_ratio(saturation_vapor_pressure_currentiter,p[level])
dsdt = (Dryair.cp + rparcel*Liquidwater.cp + Liquidwater.Lv*Liquidwater.Lv*mixing_ratio_currentiter/
(Watervapor.R*t_currentiter*t_currentiter))/t_currentiter
vapor_pressure_currentiter = get_partial_vapor_pressure(mixing_ratio_currentiter,p[level])
entropy_currentinter = (Dryair.cp+rparcel*Liquidwater.cp)*log(t_currentiter/unit(t_currentiter)) -
Dryair.R*log((p[level]-vapor_pressure_currentiter)/unit(p[level])) + Liquidwater.Lv*mixing_ratio_currentiter / t_currentiter
if niter < 3
temperature_step = 0.3
else
temperature_step = 1
end
t_previousiter = t_currentiter + temperature_step*(parcel_specific_entropy - entropy_currentinter)/dsdt
if (niter > 500 ) | (vapor_pressure_currentiter > ( p[level] - 1.0u"hPa") )
error("Temperature didn't converge during lift")
end
end
tvirtual_lifted = get_virtual_temperature(t_currentiter,rparcel,mixing_ratio_currentiter)
tvirtual_env = get_virtual_temperature(t[level],r[level],r[level])
tvirtual_diff_parcel_env[level] = tvirtual_lifted - tvirtual_env
end
return tvirtual_diff_parcel_env
end
"""
get_potential_temperature(temperature, pressure, reference_pressure)
Compute potential temperature from temperature and pressure.
"""
function get_potential_temperature(temperature, pressure, reference_pressure)
exponent = ustrip(Dryair.R / Dryair.cp)
return temperature * (reference_pressure/pressure)^exponent
end
function get_potential_temperature(temperature :: Quantity, pressure :: Quantity, reference_pressure :: Quantity)
exponent = Dryair.R / Dryair.cp
return temperature * (reference_pressure/pressure)^exponent
end
"""
get_virtual_temperature(temperature, specific_humidity)
Compute virtual temperature from temperature and specific humidity.
"""
function get_virtual_temperature(temperature, specific_humidity)
return (one(temperature) + one(temperature)/1000*epsilon*specific_humidity)*temperature
end
function get_virtual_temperature(temperature :: Quantity, specific_humidity :: Quantity)
return (one(temperature) + one(temperature)/1000*epsilon*specific_humidity)*temperature
end
"""
function surface_sensible_heat_flux_to_buoyancy(SST , sensible_heat_flux ; rho = 1)
Convert surface energy fluxes in units of W/m^2 to units of buoyancy m^2/s^3).
"""
function surface_sensible_heat_flux_to_buoyancy(SST, sensible_heat_flux; rho = 1.0)
return ustrip(g) /(ustrip(Dryair.cp)*SST) * sensible_heat_flux
end
function surface_sensible_heat_flux_to_buoyancy(SST :: Quantity, sensible_heat_flux :: Quantity; rho = 1u"kg/m^3")
return g /(1u"kg/m^3"*Dryair.cp*SST) * sensible_heat_flux
end
"""
function surface_latent_heat_flux_to_buoyancy(SST , sensible_heat_flux ; rho = 1)
Convert surface energy fluxes in units of W/m^2 to units of buoyancy m^2/s^3).
"""
function surface_latent_heat_flux_to_buoyancy(SST, latent_heat_flux; rho = 1.0)
return ustrip(g)/(one(SST)*ustrip(Dryair.cp)*SST)*(epsilon*ustrip(Dryair.cp)*SST/ustrip(Liquidwater.Lv)*latent_heat_flux)
end
function surface_latent_heat_flux_to_buoyancy(SST :: Quantity, latent_heat_flux :: Quantity; rho = 1u"kg/m^3")
return g/(1u"kg/m^3"*Dryair.cp*SST)*(epsilon*Dryair.cp*SST/Liquidwater.Lv*latent_heat_flux)
end
"""
get_buoyancy(temperature_anomaly,mean_temperature)
Compute buoyancy from a temperature anomaly and a temperature profile as in RamirezReyes and Yang 2021
"""
function get_buoyancy(temperature_anomaly,mean_temperature)
return ustrip(g) * ustrip(temperature_anomaly)/ustrip(mean_temperature)
end
function get_buoyancy(temperature_anomaly :: Quantity ,mean_temperature :: Quantity)
return g * temperature_anomaly/mean_temperature
end
"""
radiative_heating_rate_to_buoyancy(mean_temperature,radiative_heating_rate)
Convert radiative heating rate in K/s to buoyancy production rate in m/s^2 using the a mean sounding as reference profile
"""
function radiative_heating_rate_to_buoyancy(mean_temperature,radiative_heating_rate)
return ustrip(g) * radiative_heating_rate / mean_temperature
end
function radiative_heating_rate_to_buoyancy(mean_temperature :: Quantity,radiative_heating_rate :: Quantity)
return g * radiative_heating_rate / mean_temperature
end
"""
get_density_ideal_gas(pressure, temperature)
Compute density of dry air from pressure and temperature using the Ideal gas law.
"""
function get_density_ideal_gas(pressure, temperature)
return pressure / temperature / eltype(temperature)(ustrip(Dryair.R))
end
function get_density_ideal_gas(pressure :: Quantity, temperature :: Quantity)
return pressure / temperature / Dryair.R
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 3358 | """
"""
function average_precipitation_per_pw_bin_dayang(pw,precipitation,max_precip_bin,binspacing)
precip_bin = Float64[]
pw_bins = 0:binspacing:max_precip_bin
n_pw_interval = length(pw_bins)
for i in 1:n_pw_interval
pw_i_low = pw_bins[1] + binspacing*(i-1)
pw_i_up = pw_bins[1] + binspacing*i
ind_i = findall(x -> (x>pw_i_low)&(x<pw_i_up),pw)
if isempty(ind_i)
push!(precip_bin,0.0)
else
push!(precip_bin,mean(precipitation[ind_i]))
end
end
return precip_bin
end
"""
average_precipitation_per_pw_bin(pw,precipitation,bins,binspacing)
Computes the mean precipitation as a function of the binned precipitable water following Yang, D., 2018: Boundary Layer Height and Buoyancy Determine the Horizontal Scale of Convective Self-Aggregation. J. Atmos. Sci., 75, 469β478, https://doi.org/10.1175/JAS-D-17-0150.1.
It will be happy if bins is quite large (example from 0 to 300 mm).
"""
function average_precipitation_per_pw_bin(pw,precipitation,pw_bins,binspacing)
T = eltype(pw)
tone = one(T)
tzero = zero(T)
probability_of_being_in_bin = zeros(T,length(pw_bins))
average_precipitation_per_bin = zeros(T,length(pw_bins))
number_of_columns = reduce(*,size(pw))
factor = number_of_columns * binspacing
@inbounds @simd for column in CartesianIndices(pw)
binindex = ceil(Int,pw[column]/binspacing)
average_precipitation_per_bin[binindex] += precipitation[column]
probability_of_being_in_bin[binindex] += tone
end
@tturbo for ind in eachindex(probability_of_being_in_bin)
divideby = ifelse(probability_of_being_in_bin[ind] == tzero,tone,probability_of_being_in_bin[ind])
average_precipitation_per_bin[ind] /= divideby
end
@tturbo probability_of_being_in_bin ./= factor
return probability_of_being_in_bin,average_precipitation_per_bin
end
"""
velocity_topolar(u,v,index,center)
Take a velocity vector, an origin of said vector and a center and return the tangential and azimuthal velocities with respect to that center.
"""
function velocity_topolar(u,v,index,center)
pos = index .- center
theta1 = atan(v,u)
theta2 = atan(pos[2],pos[1])
return -hypot(u,v) * cos(theta1 + theta2), hypot(u,v)*sin(theta1 + theta2)
end
"""
velocity_cartesian_to_polar(u,v,index_of_point = (0,0),index_of_center = (0,0))
Take matrix of U and V components of velocities as well as a matrix of locations and the location of a center.
Compute the tangential and radial components of velocity around this center.
"""
function velocity_cartesian_to_polar(u,v,index_of_point = (0,0),index_of_center = (0,0))
tangential = similar(u)
radial = similar(u)
for index in CartesianIndices(u)
t,r = velocity_cartesian_to_polar(u[index],v[index],Tuple(index)[1:2],index_of_center[1:2])
radial[index] = r
tangential[index] = t
end
return tangential,radial
end
function velocity_cartesian_to_polar(u :: Number,v :: Number,index_of_point = (0,0),index_of_center = (0,0))
speed = hypot(u,v)
x,y = index_of_point .- index_of_center
angle_of_position_vector = atan(y,x)
angle_of_velocity_vector = atan(v,u)
return velocity_polar = speed.*sincos( angle_of_velocity_vector - angle_of_position_vector )
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 13908 |
#using AvailablePotentialEnergyFramework
#using NCDatasets
#using Statistics
#using JLD
#using Unitful: ustrip
"""
getapeanalysis(file2d,file3d,output_name,outputInterval,FloatType::Type=Float64)
"""
function testAPEBudget()
file2d = "/Users/arreyes/Documents/Research/Developement/testNetCDF/timeSlab_2d.nc"
file3d = "/Users/arreyes/Documents/Research/Developement/testNetCDF/timeSlab_3d.nc"
Float_type = Float32
day = 86400
sst = 300
dt = 14400
Pref = 1000*1e2 #Pa
Ts = sst #Sea surface temperature
qs = 25.7*1e-3
Tvs = Ts*(1+epsilon*qs)
c1 = ustrip(Dryair.R/Dryair.cp)
ds3d = Dataset(file3d)
ds2d = Dataset(file2d)
x = variable(ds3d,"x")[:] :: Array{Float_type,1}
y = variable(ds3d,"y")[:] :: Array{Float_type,1}
z = variable(ds3d,"z")[:] :: Array{Float_type,1}
t = variable(ds3d,"time")[:] :: Array{Float_type,1}
P0 = variable(ds3d,"p")[:] :: Array{Float_type,1}
U = variable(ds3d,"U")[:,:,:,:] :: Array{Float_type,4}
V = variable(ds3d,"V")[:,:,:,:] :: Array{Float_type,4}
W = variable(ds3d,"W")[:,:,:,:] :: Array{Float_type,4}
RAD = variable(ds3d,"QRAD")[:,:,:,:] :: Array{Float_type,4}
T = variable(ds3d,"TABS")[:,:,:,:] :: Array{Float_type,4}
Tv = variable(ds3d,"QV")[:,:,:,:] :: Array{Float_type,4}
PP = variable(ds3d,"PP")[:,:,:,:] :: Array{Float_type,4}
SHF = variable(ds2d,"SHF")[:,:,:] :: Array{Float_type,3}
LHF = variable(ds2d,"LHF")[:,:,:] :: Array{Float_type,3}
close(ds3d)
close(ds2d)
########## FilFloat_Typeering and chopping ##########
@. SHF = ustrip(g)/(1*ustrip(Dryair.cp)*Ts)*(SHF)
@. LHF = ustrip(g)/(1*ustrip(Dryair.cp)*Ts)*(epsilon*ustrip(Dryair.cp)*Ts/ustrip(Liquidwater.Lv*LHF))
@. SHF += LHF # Now it is transformed
ThetaV = similar(T)
xBar_Pt = Array{eltype(T),4}(undef,1,1,size(PP,3),size(PP,4))
xBar_Tv = Array{eltype(T),4}(undef,1,1,size(Tv,3),size(Tv,4))
xBar_ThetaV = Array{eltype(T),4}(undef,1,1,size(ThetaV,3),size(ThetaV,4))
#******
dx = x[2]-x[1]
dy = y[2]-y[1] # Grid size
kz = length(z) # vertical levels
kx = length(x) # # of horizonal grid points
ky = length(y) # # of horizonal grid points
@. RAD = RAD/day # K/s #Heating rate per second;
@. Tv = get_virtual_temperature.(T,Tv)#(1 + 1e-3*epsilon*Tv)*T # Virtual temperature
@. P0 = P0*1e2
PP .= PP .+ reshape(P0,(1,1,kz,1))
#ThetaV .= Tv.*(PP./reshape(P0,(1,1,kz,1))).^c1 # Virtual potential temp
ThetaV .= get_potential_temperature.(Tv,PP,P0[1]) #Tv.*(PP/P0[1]).^c1 # Virtual potential temp
mean!(xBar_Pt,PP)
mean!(xBar_Tv,Tv)
mean!(xBar_ThetaV,ThetaV)
var_Tv = Tv .- xBar_Tv
var_ThetaV = ThetaV .- xBar_ThetaV
rho0 = xBar_Pt./ustrip(Dryair.R)./xBar_Tv
B = ustrip(g) .* var_ThetaV./xBar_ThetaV
@. RAD = RAD*(ustrip(g)/xBar_Tv) # convert unit to buoyancy/s
N2 = compute_N2(xBar_Tv,z)
# PP = []
# T = []
# ThetaV = []
# Tv = []
# var_ThetaV = []
# var_Tv = []
### NOTE that SHF is now the sum, saving memory
# Buoyancy budget
dz = 50
#@info size(B), size(RAD), size(SHF), size(U),size(V) ,size(W), size(N2), size(dx),size(dy), size(dz), size(dt), size(x),size(y), size(z), size(t)
# dBdt, UdBdx,VdBdy, WN2, Qs, Diabatic_other = buoyancybudget(B, RAD, SHF, U,V ,W, N2, dx,dy, dz, dt, x,y, z, t);
Diabatic_other = get_diabatic_as_residual_buoyancy(B, RAD, SHF, U,V ,W, N2, dx,dy, dz, dt);
@info "hola"
# APE budget
z_up = 15000
z_BL = 2000
(int_mass,
int_KE,
int_APE,
int_APE_rate,
int_APE_Ub2,
int_APE_Vb2,
int_APE_WN2,
int_APE_RAD,
int_APE_DIA,
xBar_APE_Fs,
residual) = getapebudget(B, U,V, W, N2, RAD, SHF, Diabatic_other, rho0, x,y, z, t, dx,dy, dz, dt, z_up)
# Diabatic_other .= Diabatic_other .- mean(Diabatic_other,dims=(1,2)) #They are now perturbations
# RAD .= RAD .- mean(RAD,dims=(1,2))
# B .= B .- mean(B,dims=(1,2))
# dia_ape = Diabatic_other.*B
# rad_ape = RAD.*B
# jldopen(string("outputfile.jld"), "w") do file
# write(file,"int_APE",int_APE)
# write(file,"int_KE",int_KE)
# write(file,"int_RAD",int_APE_RAD)
# write(file,"int_DIA",int_APE_DIA)
# write(file,"int_WN2",int_APE_WN2)
# write(file,"int_Ub2",int_APE_Ub2)
# write(file,"int_Vb2",int_APE_Vb2)
# write(file,"int_APE_rate",int_APE_rate)
# write(file,"APE_Fs",xBar_APE_Fs)
# write(file,"convec_heating_anomaly",Diabatic_other)
# write(file,"rad_heating_anomaly",RAD)
# write(file,"buoyancy_anomaly",B)
# write(file,"radiative_ape_production",dia_ape)
# write(file,"convective_ape_production",rad_ape)
# end
end
# """
# getapeanalysis(file2d,file3d,output_name,outputInterval,FloatType::Type=Float64)
# """
# function testAPEBudget_distributed()
# file2d = "/Users/arreyes/Documents/Research/Developement/testNetCDF/timeSlab_2d.nc"
# file3d = "/Users/arreyes/Documents/Research/Developement/testNetCDF/timeSlab_3d.nc"
# Float_type = Float32
# day = 86400
# sst = 300
# dt = 14400
# Pref = 1000*1e2 #Pa
# Ts = sst #Sea surface temperature
# qs = 25.7*1e-3
# Tvs = Ts*(1+epsilon*qs)
# c1 = (R/Dryair.cp)
# # ds3d = Dataset(file3d)
# # ds2d = Dataset(file2d)
# # x = variable(ds3d,"x")[:] :: Array{Float_type,1}
# # y = variable(ds3d,"y")[:] :: Array{Float_type,1}
# # z = variable(ds3d,"z")[:] :: Array{Float_type,1}
# # t = variable(ds3d,"time")[:] :: Array{Float_type,1}
# # P0 = variable(ds3d,"p")[:] :: Array{Float_type,1}
# # U = variable(ds3d,"U")[:,:,:,:] :: Array{Float_type,4}
# # V = variable(ds3d,"V")[:,:,:,:] :: Array{Float_type,4}
# # W = variable(ds3d,"W")[:,:,:,:] :: Array{Float_type,4}
# # RAD = variable(ds3d,"QRAD")[:,:,:,:] :: Array{Float_type,4}
# # T = variable(ds3d,"TABS")[:,:,:,:] :: Array{Float_type,4}
# # Tv = variable(ds3d,"QV")[:,:,:,:] :: Array{Float_type,4}
# # PP = variable(ds3d,"PP")[:,:,:,:] :: Array{Float_type,4}
# # SHF = variable(ds2d,"SHF")[:,:,:] :: Array{Float_type,3}
# # LHF = variable(ds2d,"LHF")[:,:,:] :: Array{Float_type,3}
# # close(ds3d)
# # close(ds2d)
# smooth_x = smooth_y = 15 #it was 11
# smooth_time = floor(Int,12*5)+1
# position = 2
# sx = sy = 100
# sz = 80
# st = 120
# dx = dy = 1000
# dz = 50
# dt = 7200
# x = y = collect(range(0,step = dx,length=sx));
# z = collect(range(0, step = dz,length = sz));
# t = collect(range(0, step = dt, length = st));
# P0 = 1000*exp.(z./8000);
# U = V = 5.0rand(sx,sy,sz,st);
# W = 0.2rand(sx,sy,sz,st);
# RAD = rand(sx,sy,sz,st);;
# T = 300 .+ rand(sx,sy,sz,st);
# Tv = 0.2rand(sx,sy,sz,st);
# PP = rand(sx,sy,sz,st);
# SHF = rand(sx,sy,st);
# LHF = rand(sx,sy,st);
# B = similar(RAD);
# ########## smoothing and chopping ##########
# @info "Smoothing batch 1"
# fU = @spawnat 2 cutborders!(filter_array(U,smooth_x,smooth_time,1),smooth_time,position)
# U = fetch(fU)
# fV = @spawnat 3 cutborders!(filter_array(V,smooth_x,smooth_time,1),smooth_time,position)
# V = fetch(fV)
# fW = @spawnat 2 cutborders!(filter_array(W,smooth_x,smooth_time,1),smooth_time,position)
# W = fetch(fW)
# fTv = @spawnat 3 cutborders!(filter_array(Tv,smooth_x,smooth_time,1),smooth_time,position)
# Tv = fetch(fTv)
# fetch(@spawnat 2 GC.gc())
# fetch(@spawnat 3 GC.gc())
# @info "smoothing batch 2"
# fT = @spawnat 2 cutborders!(filter_array(T,smooth_x,smooth_time,1),smooth_time,position)
# fPP = @spawnat 3 cutborders!(filter_array(PP,smooth_x,smooth_time,1),smooth_time,position)
# fRAD = @spawnat 3 cutborders!(filter_array(RAD,smooth_x,smooth_time,1),smooth_time,position)
# SHF = cutborders!(filter_array(SHF,smooth_x,smooth_time,1),smooth_time,position)
# LHF = cutborders!(filter_array(LHF,smooth_x,smooth_time,1),smooth_time,position)
# t = cutborders!(t,smooth_time,position)
# T = fetch(fT)
# PP = fetch(fPP)
# RAD = fetch(fRAD)
# fetch(@spawnat 2 GC.gc())
# fetch(@spawnat 3 GC.gc())
# GC.gc()
# GC.gc()
# ## Processing
# @info "Beginning processing"
# @. SHF = g/(1*Dryair.cp*Ts)*(SHF)
# @. LHF = g/(1*Dryair.cp*Ts)*(epsilon*Dryair.cp*Ts/Liquidwater.Lv*LHF)
# @. SHF += LHF # Now it is transformed
# ThetaV = similar(T)
# xBar_Pt = Array{eltype(T),4}(undef,1,1,size(PP,3),size(PP,4))
# xBar_Tv = Array{eltype(T),4}(undef,1,1,size(Tv,3),size(Tv,4))
# xBar_ThetaV = Array{eltype(T),4}(undef,1,1,size(ThetaV,3),size(ThetaV,4))
# #******
# dx = x[2]-x[1]
# dy = y[2]-y[1] # Grid size
# kz = length(z) # vertical levels
# kx = length(x) # # of horizonal grid points
# ky = length(y) # # of horizonal grid points
# @. RAD = RAD/day # K/s #Heating rate per second;
# @. Tv = (1 + 1e-3*epsilon*Tv)*T # Virtual temperature
# @. P0 = P0*1e2
# PP .= PP .+ reshape(P0,(1,1,kz,1))
# #ThetaV .= Tv.*(PP./reshape(P0,(1,1,kz,1))).^c1 # Virtual potential temp
# ThetaV .= Tv.*(PP/P0[1]).^c1 # Virtual potential temp
# mean!(xBar_Pt,PP)
# mean!(xBar_Tv,Tv)
# mean!(xBar_ThetaV,ThetaV)
# var_Tv = Tv .- xBar_Tv
# var_ThetaV = ThetaV .- xBar_ThetaV
# rho0 = dropdims(xBar_Pt./R./xBar_Tv,dims=(1,2))
# B = g .* var_ThetaV./xBar_ThetaV
# @. RAD = RAD*(g/xBar_Tv) # convert unit to buoyancy/s
# N2 = compute_N2(xBar_Tv,z)
# # PP = []
# # T = []
# # ThetaV = []
# # Tv = []
# # var_ThetaV = []
# # var_Tv = []
# ### NOTE that SHF is now the sum, saving memory
# # Buoyancy budget
# dz = 50
# #@info size(B), size(RAD), size(SHF), size(U),size(V) ,size(W), size(N2), size(dx),size(dy), size(dz), size(dt), size(x),size(y), size(z), size(t)
# @info "Computing buoyancy"
# Diabatic_other = fetch(@spawnat 3 get_diabatic_as_residual_buoyancy(B, RAD, SHF, U,V ,W, N2, dx,dy, dz, dt, x,y, z, t))
# fetch(@spawnat 3 GC.gc())
# # APE budget
# z_up = 15000
# z_BL = 2000
# @info "Computing APE budget"
# (int_mass,
# int_KE,
# int_APE,
# int_APE_rate,
# int_APE_Ub2,
# int_APE_Vb2,
# int_APE_WN2,
# int_APE_RAD,
# int_APE_DIA,
# xBar_APE_Fs,
# residual) = fetch( @spawnat 3 getapebudget(B, U,V, W, N2, RAD, SHF, Diabatic_other, rho0, x,y, z, t, dx,dy, dz, dt, z_up))
# fetch(@spawnat 3 GC.gc())
# Diabatic_other .= Diabatic_other .- mean(Diabatic_other,dims=(1,2)) #They are now perturbations
# RAD .= RAD .- mean(RAD,dims=(1,2))
# B .= B .- mean(B,dims=(1,2))
# dia_ape = Diabatic_other.*B
# rad_ape = RAD.*B
# # plot([int_APE_rate,
# # int_APE_Ub2,
# # int_APE_Vb2,
# # int_APE_WN2,
# # int_APE_RAD,
# # int_APE_DIA,
# # xBar_APE_Fs],label=["rate" "advu" "advv" "wn2" "rad" "dia" "fs" ])
# # plot!(residual,lw=4,label="residual")
# return true
# end
# @test testAPEBudget_distributed()
# """
# run_distributed_test()
# Don't forget to first run: \n
# addprocs(2) \n
# @everywhere using Pkg \n
# @everywhere Pkg.activate("/users/arreyes/Documents/Research/Developement/AvailablePotentialEnergyFramework/") \n
# @everywhere using AvailablePotentialEnergyFramework \n
# """
# function run_distributed_test()
# @info """ Don't forget to first run: \n
# addprocs(2) \n
# @everywhere using Pkg \n
# @everywhere Pkg.activate("/users/arreyes/Documents/Research/Developement/AvailablePotentialEnergyFramework/") \n
# @everywhere using AvailablePotentialEnergyFramework \n
# """
# for i in 1:10
# @info "Running ape budget, this is the $i iteration"
# testAPEBudget_distributed()
# end
# end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 4169 | ### This will have profile tests for the ape budgets
@testset "APE budgets" begin
sx,sy,sz,st = 100,100,100,10
x = y = range(0.0,stop=10.0,length = sx);
z = range(0.0,stop=500.0,length = sz);
t = range(0.0,stop=50.0,length = st);
dx = dy = x[2] - x[1];
dz = z[2] - z[1];
dt = t[2] - t[1];
zero_profile = zeros(sx,sy,sz,st);
one_profile = ones(sx,sy,sz,st);
test_brunt = 1e-4*ones(sz,st)
test_rho = ones(1,1,sz,st)
test_surface = zeros(sx,sy,st)
@test getapebudget(zero_profile,zero_profile,zero_profile,zero_profile,test_brunt,
zero_profile,test_surface,zero_profile,test_rho,x,y,z,t,dx,dy,dz,dt,50.0)[3] β zeros(st)
### If B is constant in space, advection should be zero
B = 5ones(sx,sy,sz,st);
for i in 1:st
for j in 1:sy
for k in 1:sx
B[j,k,:,i] .= B[j,k,:,i] .* sin(2pi/st*i)
end
end
end
RAD_b = 2ones(sx,sy,sz,st);
Fs = 2ones(sx,sy,st);
U = 10ones(sx,sy,sz,st);
V = 10ones(sx,sy,sz,st);
W = 10ones(sx,sy,sz,st);
N2 = 1e-4 .* ones(1,1,sz,st);
rho0 = ones(1,1,sz,st);
diabatic_other = get_diabatic_as_residual_buoyancy(B, RAD_b, Fs, U,V, W, N2, dx,dy, dz, dt);
a = getapebudget(B,U,V,W,N2,RAD_b,Fs,diabatic_other, rho0, x,y,z,t,dx,dy,dz,dt,500)
@test a[4] β -(a[7] - a[8] - a[9] - a[10])
### Budget must be closed when we have Advection, Rad, and Surface fluxes
B = 5ones(sx,sy,sz,st);
for i in 1:st
for j in 1:sy
for k in 1:sx
B[j,k,:,i] .= B[j,k,:,i] .* sin(2pi/st*i)*sin(2pi/sx*k)*sin(2pi/sy*j)
end
end
end
RAD_b = 2ones(sx,sy,sz,st);
Fs = 2ones(sx,sy,st);
U = 10rand(sx,sy,sz,st);
V = 10rand(sx,sy,sz,st);
W = 5rand(sx,sy,sz,st);
N2 = 1e-4 .* ones(1,1,sz,st);
rho0 = ones(1,1,sz,st);
diabatic_other = get_diabatic_as_residual_buoyancy(B, RAD_b, Fs, U,V, W, N2, dx,dy, dz, dt);
a = getapebudget(B,U,V,W,N2,RAD_b,Fs,diabatic_other, rho0, x,y,z,t,dx,dy,dz,dt,500)
@test a[4] β -(a[5] + a[6] + a[7] - a[8] - a[9] - a[10])
### Budget must be closed when we have Advection, Rad, and zero surface fluxes
B = 5ones(sx,sy,sz,st);
for i in 1:st
for j in 1:sy
for k in 1:sx
B[j,k,:,i] .= B[j,k,:,i] .* sin(2pi/st*i)*sin(2pi/sx*k)*sin(2pi/sy*j)
end
end
end
RAD_b = 2ones(sx,sy,sz,st);
Fs = zeros(sx,sy,st);
U = 10rand(sx,sy,sz,st);
V = 10rand(sx,sy,sz,st);
W = 5rand(sx,sy,sz,st);
N2 = 1e-4 .* ones(1,1,sz,st);
rho0 = ones(1,1,sz,st);
diabatic_other = get_diabatic_as_residual_buoyancy(B, RAD_b, Fs, U,V, W, N2, dx,dy, dz, dt);
a = getapebudget(B,U,V,W,N2,RAD_b,Fs,diabatic_other, rho0, x,y,z,t,dx,dy,dz,dt,500)
@test a[4] β -(a[5] + a[6] + a[7] - a[8] - a[9] )
### Budget must be closed when we have Advection, zero Rad, and finite surface fluxes
B = 5ones(sx,sy,sz,st);
for i in 1:st
for j in 1:sy
for k in 1:sx
B[j,k,:,i] .= B[j,k,:,i] .* sin(2pi/st*i)*sin(2pi/sx*k)*sin(2pi/sy*j)
end
end
end
RAD_b = zeros(sx,sy,sz,st);
Fs = 2ones(sx,sy,st);
U = 10rand(sx,sy,sz,st);
V = 10rand(sx,sy,sz,st);
W = 5rand(sx,sy,sz,st);
N2 = 1e-4 .* ones(1,1,sz,st);
rho0 = ones(1,1,sz,st);
diabatic_other = get_diabatic_as_residual_buoyancy(B, RAD_b, Fs, U,V, W, N2, dx,dy, dz, dt);
a = getapebudget(B,U,V,W,N2,RAD_b,Fs,diabatic_other, rho0, x,y,z,t,dx,dy,dz,dt,500)
@test a[4] β -(a[5] + a[6] + a[7] - a[9] - a[10])
# labels = ["mass" "KE" "APE" "βtAPE" "AdvecU" "AdvecV" "Conversion" "Rad" "Convec" "Surf" "Res"]
# mycolors = distinguishable_colors(11) #NEEDS COLORS.JL
# fig = Figure(resolution = (1200, 700), backgroundcolor = RGBf0(0.98, 0.98, 0.98),title = "APE budget closure test")
# ax1 = fig[1,1] = Axis(fig)
# ylims!(ax1,extrema(reduce(vcat,a[4:11])))
# for i in 4:11
# lines!(ax1,a[i], label = labels[i], color = mycolors[i], linewidth = 5)
# end
# axislegend(ax1)
# fig
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 2465 | using AvailablePotentialEnergyFramework, ImageFiltering, Test
@testset "Array filtering" begin
@testset "Kernel creation" begin
@test AvailablePotentialEnergyFramework.kernel_4d(false,false) ==
ImageFiltering.kernelfactors((centered([(1.0)]),centered([(1.0)]),centered([(1.0)]),centered([(1.0)]) ))
@test AvailablePotentialEnergyFramework.kernel_4d(5,false) ==
ImageFiltering.kernelfactors((centered(ones(5)./5.0),centered(ones(5)./5.0),centered([(1.0)]),centered([(1.0)]) ))
end
@testset "Filter arrays 1 and 2" begin
@testset "Filter_array!" begin
ones3d = ones(20,20,20)
ones4d = ones(20,20,20,20)
zeros3d = zeros(20,20,20)
zeros4d = zeros(20,20,20,20)
buf3d = similar(ones3d)
buf4d = similar(ones4d)
filter_array!(buf3d,zeros3d,5,5,1)
filter_array!(buf3d,ones3d,5,5,1)
filter_array!(buf4d,zeros4d,5,5,1)
filter_array!(buf4d,ones4d,5,5,1)
@test ones3d == ones(20,20,20)
@test zeros3d == zeros(20,20,20)
@test ones4d == ones(20,20,20,20)
@test zeros4d == zeros(20,20,20,20)
end
@testset "Filter_array_2" begin
zeros3d = zeros(20,20,20);
ones3d = ones(20,20,20)
zeros4d = zeros(20,20,20,20);
ones4d = ones(20,20,20,20)
filter_array_2!(ones3d,5,5,1)
filter_array_2!(zeros3d,5,5,1)
filter_array_2!(ones4d,5,5,1)
filter_array_2!(zeros4d,5,5,1)
@test ones3d == ones(20,20,20)
@test zeros3d == zeros(20,20,20)
@test ones4d == ones(20,20,20,20)
@test zeros4d == zeros(20,20,20,20)
end
@testset "Both filters simultaneously" begin
arr3d = rand(20,20,20)
arr3d2 = copy(arr3d)
arr3d3 = copy(arr3d)
arr4d = rand(20,20,20,20)
arr4d2 = copy(arr4d)
arr4d3 = copy(arr4d)
buf3d = similar(arr3d)
buf4d = similar(arr4d)
filter_array_2!(arr3d2,5,5,1)
filter_array_2!(arr4d2,5,5,1)
filter_array!(buf3d,arr3d3,5,5,1)
filter_array!(buf4d,arr4d3,5,5,1)
@test arr3d3 == arr3d2
@test arr4d3 == arr4d2
end
end
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 4678 |
@testset "Composite helper functions" begin
x1 = origin = (0,0)
x2 = (1,1)
x3 = (0,4)
x4 = (0,2)
binlimits = (0,2)
gridspacing = 1
@test AvailablePotentialEnergyFramework.distance(x1,x2,gridspacing) == AvailablePotentialEnergyFramework.distance(x2,x1,gridspacing) == sqrt(2)
@test AvailablePotentialEnergyFramework.isindexindistancebin(binlimits,x2,x1) == true
@test AvailablePotentialEnergyFramework.isindexindistancebin(binlimits,x3,x1) == false
@test AvailablePotentialEnergyFramework.isindexindistancebin(binlimits,x4,x1) == true
array_allindistance_2d = let
array_allindistance_2d = zeros(100,100)
for i in 1:100, j in 1:100
array_allindistance_2d[i,j] = hypot(i,j)
end
array_allindistance_2d
end
@test isapprox(AvailablePotentialEnergyFramework.averageallindistance((99,100),array_allindistance_2d,origin), 100; rtol = 1)
array_allindistance_3d = let
array_allindistance_3d = zeros(100,100,10)
for i in 1:100, j in 1:100
array_allindistance_3d[i,j,:] .= hypot(i,j)
end
array_allindistance_3d
end
#@test isapprox.(AvailablePotentialEnergyFramework.averageallindistance((99,100),array_allindistance_3d,origin), 100; rtol = 1) == fill(true,10)
@test all(isapprox.(AvailablePotentialEnergyFramework.velocity_topolar(10,0,x4,origin),(0,10),atol=0.1))
@test all(isapprox.(AvailablePotentialEnergyFramework.velocity_topolar(-10,0,x4,origin),(0,-10),atol=0.1))
@test all(isapprox.(AvailablePotentialEnergyFramework.velocity_topolar(0,10,x4,origin),(10,0),atol=0.1))
@test all(isapprox.(AvailablePotentialEnergyFramework.velocity_topolar(0,-10,x4,origin),(-10,0),atol=0.1))
end
@testset "Cyclone detection tools" begin
psfc = Dataset(joinpath(@__DIR__,"testfiles/test_composite_PSFC_TABS.nc")) do ds
variable(ds, "PSFC")[:,:,:]
end
psfc = cat(psfc,fill(mean(psfc) ,size(psfc)) + 5rand(size(psfc)...),dims=3)
TABS = Dataset(joinpath(@__DIR__,"testfiles/test_composite_PSFC_TABS.nc")) do ds
variable(ds, "TABS")[:,:,:,:]
end
TABS = cat(TABS,fill(mean(TABS) ,size(TABS)) + 1rand(size(TABS)...),dims=3)
@testset "Detect centers" begin
pressure_anomaly = psfc .- mean(psfc,dims=(1,2))
centerstest = AvailablePotentialEnergyFramework.findcyclonecenters(AvailablePotentialEnergyFramework.PressureMinima(),pressure_anomaly[:,:,1],-5)
@test length(centerstest) == 5
centers_and_labels,cyclones = detect_cyclones(AvailablePotentialEnergyFramework.PressureMinima(),pressure_anomaly[:,:,1],-5,2000)
@test length(centers_and_labels) == 6
@test all([cyclones.segment_pixel_count[key] > 1 for key in keys(cyclones.segment_pixel_count)])
(cyclonecount_2d,addition_2d) = AvailablePotentialEnergyFramework.add_allcyclones(psfc[:,:,1],cyclones,centers_and_labels; maskcyclones = false)
@test cyclonecount_2d == 3
(cyclonecount_2d,addition_2d) = AvailablePotentialEnergyFramework.add_allcyclones(psfc[:,:,1],cyclones,centers_and_labels; maskcyclones = true)
@test cyclonecount_2d == 3
(cyclonecount_3d,addition_3d) = AvailablePotentialEnergyFramework.add_allcyclones(TABS[:,:,:,1],cyclones,centers_and_labels; maskcyclones = false)
@test cyclonecount_3d == 3
(cyclonecount_3d,addition_3d) = AvailablePotentialEnergyFramework.add_allcyclones(TABS[:,:,:,1],cyclones,centers_and_labels; maskcyclones = true)
@test cyclonecount_3d == 3
binlimits = 0:2000:300000
bins = [(binlimits[ind] , binlimits[ind + 1]) for ind in 1:(length(binlimits) - 1) ]
@test_nowarn [averageallindistance(bin,addition_2d,(128,128),4000) for bin in bins]
@test_nowarn [averageallindistance(bin,addition_3d,(128,128),4000) for bin in bins]
### Try and add two frames, one with and one without TC
centers_labels_and_cyclones = [detect_cyclones(AvailablePotentialEnergyFramework.PressureMinima(),pressure_anomaly[:,:,i],-5,2000) for i in 1:2]
@test 3 == begin
totalcyclonecount = 0
for timeindex in 1:2
centers_and_labels,cyclones = centers_labels_and_cyclones[timeindex]
if !isnothing(centers_and_labels)
count, _ = AvailablePotentialEnergyFramework.add_allcyclones(TABS[:,:,timeindex],cyclones,centers_and_labels;maskcyclones = false)
totalcyclonecount += count
end
end
totalcyclonecount
end
end
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 636 |
#Test matrix with cyclones
matrixwithcyclones = zeros(Float64,20,20)
matrixwithcyclones[2,1] = -2 #Center
matrixwithcyclones[8:10,5:7] .= -1
matrixwithcyclones[9,6] = -2 #Center
matrixwithcyclones[17:19,17:19] .= -1
matrixwithcyclones[18,18] = -2
## Fake pressure perturbation
@testset "Cyclone detection tools" begin
@testset "Detect centers" begin
centerstest = AvailablePotentialEnergyFramework.findcyclonecenters_aspressureminima(matrixwithcyclones,0)
@test length(centerstest) == 2
@test centerstest[1] == CartesianIndex(9, 6)
@test centerstest[2] == CartesianIndex(18, 18)
end
end | AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 4933 | @testset "Brunt-Vaisala" begin
Ξneutral = AvailablePotentialEnergyFramework.g/AvailablePotentialEnergyFramework.Dryair.cp
Ξstable = Ξneutral - 10e-3u"K/m"
Ξunstable = Ξneutral + 10e-3u"K/m"
z = 1u"m" .* collect(0:50:10e3)
tvprofile(Ξ,z) = reshape(repeat(300 .- Ξ*z,1,2),1,1,length(z),2)
tvprofile(Ξ :: Quantity, z :: Array{ <: Quantity}) = reshape(repeat(300u"K" .- Ξ*z,1,2),1,1,length(z),2)
function plot_N2(Ξ,z)
tv_profile = tvprofile(Ξ,z)
p1 = plot(profile1[1,1,:,1],z,title = "Tv profile, Ξ = $Ξ")
p2 = plot(AvailablePotentialEnergyFramework.compute_N2_attempt(tv_profile,z),z, title = "N2")
plot(p1,p2)
end
@test isapprox( 1u"1/s/s" .* zeros(length(z),2),compute_N2(tvprofile(Ξneutral,z),z), atol=1e-10u"1/s/s")
@test all(compute_N2(tvprofile(Ξunstable,z),z) .< 0u"1/s/s" )
@test all(compute_N2(tvprofile(Ξstable,z),z) .> 0u"1/s/s" )
@test isapprox( zeros(length(z),2),compute_N2(tvprofile(ustrip(Ξneutral),ustrip.(z)),ustrip.(z)), atol=1e-10)
@test all(compute_N2(tvprofile(ustrip(Ξunstable),ustrip.(z)),ustrip.(z)) .< 0 )
@test all(compute_N2(tvprofile(ustrip(Ξstable),ustrip.(z)),ustrip.(z)) .> 0 )
end
@testset "Thermodynamics" begin
@test get_saturation_vapor_pressure(273.15u"K") == 6.112u"hPa"
@test get_partial_vapor_pressure(0,1000u"hPa") == 0u"hPa"
@test get_partial_vapor_pressure(1,1000u"hPa") == 1000u"hPa"/(18.016/28.966 + 1.0)
@test get_mixing_ratio(0u"hPa",1000u"hPa") == 0
@test get_mixing_ratio(get_partial_vapor_pressure(0.5,1000.0), 1000.0) == 0.5
@test get_virtual_temperature(300u"K",0,0) == 300u"K"
@test specific_humidity_to_mixing_ratio(mixing_ratio_to_specific_humidity(0.5)) β 0.5
@test mixing_ratio_to_specific_humidity(specific_humidity_to_mixing_ratio(0.5)) β 0.5
@test unit(get_specific_entropy(300u"K",0.2,1000u"hPa"))== u"J/K/kg"
@test get_potential_temperature(300u"K",1000u"hPa",1000u"hPa") == 300u"K"
@test get_potential_temperature(300u"K",1010u"hPa",1000u"hPa") < 300u"K"
@test get_potential_temperature(300u"K",900u"hPa",1000u"hPa") > 300u"K"
@test get_virtual_temperature(300u"K",0u"g/kg") == 300u"K"
@test get_virtual_temperature(300u"K",0u"g/kg") == 300u"K"
@test get_virtual_temperature(300u"K",10u"g/kg") > 300u"K"
@test 1unit(surface_sensible_heat_flux_to_buoyancy(300u"K", 100u"W/m^2")) == 1unit(g)/u"s"*u"m"
@test 1unit(surface_latent_heat_flux_to_buoyancy(300u"K", 100u"W/m^2")) == 1unit(g)/u"s"*u"m"
@test 1unit(get_buoyancy(0.5u"K",10.5u"K")) == 1unit(g)
@test 1unit(radiative_heating_rate_to_buoyancy(10u"K",5u"K/s")) == 1u"m/s^2/s"
@test 1unit(get_density_ideal_gas(10.0u"Pa",5.0u"K")) == 1u"kg/m^3"
# No units
@test get_saturation_vapor_pressure(273.15) == 6.112
@test get_partial_vapor_pressure(0,1000) == 0
@test get_partial_vapor_pressure(1,1000) == 1000/(18.016/28.966 + 1.0)
@test get_mixing_ratio(0,1000) == 0
@test get_potential_temperature(300,1000,1000) == 300
@test get_potential_temperature(300,1010,1000) < 300
@test get_potential_temperature(300,900,1000) > 300
@test get_virtual_temperature(300,0) == 300
@test get_virtual_temperature(300,0) == 300
@test get_virtual_temperature(300,10) > 300
@test ustrip(surface_sensible_heat_flux_to_buoyancy(300u"K", 100u"W/m^2")) == surface_sensible_heat_flux_to_buoyancy(300, 100)
@test ustrip(surface_latent_heat_flux_to_buoyancy(300u"K", 100u"W/m^2")) == surface_latent_heat_flux_to_buoyancy(300, 100)
@test ustrip(get_buoyancy(0.5u"K",10.5u"K")) == get_buoyancy(0.5,10.5)
@test ustrip(radiative_heating_rate_to_buoyancy(10u"K",5u"K/s")) == ustrip(radiative_heating_rate_to_buoyancy(10,5))
@test ustrip(get_density_ideal_gas(10.0u"Pa",5.0u"K")) == get_density_ideal_gas(10.0,5.0)
pres = Dataset(joinpath(@__DIR__,"testfiles/thermoprofile.nc")) do ds
1u"hPa" .* variable(ds, "PRES")[:,:]
end
tabs = Dataset(joinpath(@__DIR__,"testfiles/thermoprofile.nc")) do ds
1u"K" .* variable(ds, "TABS")[:,:]
end
qv = 1e-3u"kg/g" .* 1u"g/kg" .* Dataset(joinpath(@__DIR__,"testfiles/thermoprofile.nc")) do ds
variable(ds, "QV")[:,:] #was originally in g/kg
end
@info size(pres) size(qv) size(tabs)
r = specific_humidity_to_mixing_ratio.(qv)
timeindex = 1200
pparcel = pres[1,timeindex]
tparcel = tabs[1,timeindex]
rparcel = r[1,timeindex]
#I will create a similar profile but with a perturbation to see what happens
tabs_unstable = copy(tabs)
tabs_unstable[2:40,:] .- 7.0u"K"
tabs_unstable[41:end,:] .+ 7.0u"K"
@test_broken unit(get_buoyancy_of_lifted_parcel(tparcel,rparcel,pparcel,tabs[:,timeindex],r[:,timeindex],pres[:,timeindex])[1]) == u"K"
@test_broken get_buoyancy_of_lifted_parcel(tparcel,rparcel,pparcel,tabs_unstable[:,timeindex],r[:,timeindex],pres[:,timeindex])
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 949 | using AvailablePotentialEnergyFramework
using Test
using NCDatasets: Dataset, variable
using Statistics: mean
using Unitful: @u_str, unit, ustrip, Quantity
# @test integrate_vertically(1:10,dz=2) == 110
# @test integrate_vertically(1:10,dz=2,weight=2) == 220
# testmat_2d= repeat(1:10,1,10)
# testweights_1d = reverse(1:10)
# testweights_2d = (1:10)'.*repeat(reverse(1:10),1,10)
# @test integrate_vertically(testmat_2d) == reshape(10*(1:10),10,1)
# @test integrate_vertically(testmat_2d,weight=testweights_1d) == reshape(10*(1:10)).*reverse((1:10),10,1)
# @test integrate_vertically(testmat_2d,weight=testweights_2d) == reshape(55*(1:10)).*reverse(1:10),10,1)
@testset "AvailablePotentialEnergyFramework" begin
include("compositehelperfunctions.jl")
include("physicsfunctions.jl")
include("useful_diagnostics.jl")
include("apebudgets.jl")
# include("ape_computation_from_julia_output.jl")
#include("arrayfiltering.jl")
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 516 |
@testset "Diagnostic utilities" begin
pw = 200rand(100,100,100)
precipitation = 3pw
max_precip_bin = 200
binspacing = 0.25
pw_bins = 0:binspacing:max_precip_bin
average_precip_per_bin_da = AvailablePotentialEnergyFramework.average_precipitation_per_pw_bin_dayang(pw,precipitation,max_precip_bin,binspacing)
probabilities,average_precip_per_bin = AvailablePotentialEnergyFramework.average_precipitation_per_pw_bin(pw,precipitation,pw_bins,binspacing)
@test average_precip_per_bin β average_precip_per_bin_da
end | AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 363 | ####
#### Coverage summary, printed as "(percentage) covered".
####
#### Useful for CI environments that just want a summary (eg a Gitlab setup).
####
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
covered_lines, total_lines = get_summary(process_folder())
percentage = covered_lines / total_lines * 100
println("($(percentage)%) covered")
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | code | 266 | # only push coverage from one bot
get(ENV, "TRAVIS_OS_NAME", nothing) == "linux" || exit(0)
get(ENV, "TRAVIS_JULIA_VERSION", nothing) == "1.0" || exit(0)
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
Codecov.submit(Codecov.process_folder())
end
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | docs | 1503 | # AvailablePotentialEnergyFramework.jl
<!--




 -->
[](http://codecov.io/github/aramirezreyes/AvailablePotentialEnergyFramework.jl?branch=dev)
A module to work in the APE framework using outputs from the System for Atmospheric Modeling (SAM) 3d
As a part of my PhD path, I am doing experiments with the System for Atmospheric Modeling (SAM32, Khairoutdinov, M. F., and D.A. Randall, 2001: A cloud resolving model as a cloud parameterization in the NCAR Community Climate System Model: Preliminary Results. Geophys. Res. Lett., 28, 3617-3620)
I am currently working on analzing idealized experiments from an Available Potential Energy (APE) framework, focusing specially on the cyclogenesis process.
This, naturally, establishes the kind of functionality I am implementing on these lines.
This is an attempt to order my scripting with the hope that it will be useful to other people wile forcing me to keep good developing practices.
Argel Ramirez Reyes
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 1c56c6aa813337249e142d227a661747b0d287cd | docs | 39 | # SAMtools
*Documentation goes here.*
| AvailablePotentialEnergyFramework | https://github.com/aramirezreyes/AvailablePotentialEnergyFramework.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1318 | # You need to add the Distributed package as a dependency in order for this to run
@everywhere using Pkg
@everywhere Pkg.activate(".")
@everywhere include("benchmark/maxthroughputbase.jl")
# Create schedulers and pingers on worker processes
@sync @distributed for i = 1:nworkers()
global schedulers
global pingers
ps = [Pinger(nothing, emptycore(ctx)) for i=1:1]
scheduler = Scheduler(ctx, ps) # A single scheduler per process
for pinger in ps
send(scheduler, addr(pinger), CreatePeer())
push!(pingers, addr(pinger))
end
push!(schedulers, scheduler)
end
# Collect pingers from worker processes
all_pingers = []
for i = 2:nworkers() + 1
global all_pingers
push!(all_pingers, fetch(@spawnat i pingers)...)
end
# Run remote schedulers
@distributed for i = 1:nworkers()
for scheduler in schedulers
scheduler(; remote = true)
end
end
# Wait Rudimentarily for them to start
sleep(8)
# Create and run the coordinator on the master process
coordinator = Coordinator(all_pingers)
scheduler = Scheduler(ctx, [coordinator])
spawn(scheduler, coordinator)
# TODO Check this if it's run correctly
scheduler(; remote = true)
# Stop remote schedulers
@distributed for i = 1:nworkers()
for scheduler in schedulers
shutdown!(scheduler)
end
end
;
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1769 | include("pingpongbase.jl")
mutable struct Coordinator <: Actor{Any}
pingeraddrs::Vector{Addr}
results::Dict{ActorId, Vector{PerfReading}}
gotreads::Int
core
Coordinator(pingeraddrs) = begin
results = Dict{ActorId, Vector{PerfReading}}(box(a) => PerfReading[] for a in pingeraddrs)
return new(pingeraddrs, results, 0)
end
end
CircoCore.onmessage(me::Coordinator, ::OnSpawn, service) = begin
@info "Monitoring $(length(me.pingeraddrs)) pingers."
sendreqs(me, service)
end
function sendreqs(me::Coordinator, service)
me.gotreads = 0
for p in me.pingeraddrs
send(service, me, p, ReadPerf(addr(me)))
end
end
CircoCore.onmessage(me::Coordinator, r::PerfReading, service) = begin
push!(me.results[r.reporter], r)
me.gotreads += 1
if me.gotreads == length(me.pingeraddrs)
printlastresults(me)
settimeout(service, me, 1.0)
end
end
CircoCore.onmessage(me::Coordinator, ::Timeout, service) = begin
sendreqs(me, service)
end
function printlastresults(c::Coordinator)
println()
total = 0
totaltime = 0.0
for perfs in values(c.results)
length(perfs) <= 1 && continue
curperf = perfs[end]
lastperf = perfs[end - 1]
msgcount = curperf.pings_sent + curperf.pongs_got - lastperf.pings_sent - lastperf.pongs_got
cputime = curperf.timestamp - lastperf.timestamp
total += msgcount
totaltime += cputime
print("$(msgcount / cputime), ")
end
if totaltime > 0
println("\nTotal: $(total / totaltime * length(c.pingeraddrs))")
end
end
schedulers = []
pingers = []
ctx = CircoContext(;
profile=CircoCore.Profiles.MinimalProfile(),
userpluginsfn=() -> [CircoCore.PostOffice()]
)
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1403 | # SPDX-License-Identifier: MPL-2.0
using CircoCore
# ANCHOR Ping-Pong
mutable struct Pinger{TCore} <: Actor{TCore}
peer::Union{Addr, Nothing}
pings_sent::Int64
pongs_got::Int64
core::TCore
Pinger(peer, core) = new{typeof(core)}(peer, 0, 0, core)
end
mutable struct Ponger{TCore} <: Actor{TCore}
peer::Addr
core::TCore
Ponger(peer, core) = new{typeof(core)}(peer, core)
end
struct Ping end
struct Pong end
struct CreatePeer end
function sendping(service, me::Pinger)
send(service, me, me.peer, Ping())
me.pings_sent += 1
end
function sendpong(service, me::Ponger)
send(service, me, me.peer, Pong())
end
CircoCore.onmessage(me::Pinger, ::CreatePeer, service) = begin
peer = Ponger(addr(me), emptycore(service))
me.peer = spawn(service, peer)
sendping(service, me)
end
CircoCore.onmessage(me::Ponger, ::Ping, service) = sendpong(service, me)
CircoCore.onmessage(me::Pinger, ::Pong, service) = begin
me.pongs_got += 1
sendping(service, me)
return nothing
end
struct ReadPerf
requestor::Addr
end
struct PerfReading
reporter::ActorId
pings_sent::Int
pongs_got::Int
timestamp::Float64
end
PerfReading() = PerfReading(0, 0, 0, Libc.time())
CircoCore.onmessage(me::Pinger, req::ReadPerf, service) = begin
send(service, me, req.requestor, PerfReading(box(me), me.pings_sent, me.pongs_got, Libc.time()))
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2727 | # SPDX-License-Identifier: MPL-2.0
using CircoCore
const MSG_COUNT = 50_000_000
mutable struct PingPonger{TCore} <: Actor{TCore}
peer::Union{Addr, Nothing}
pings_sent::Int64
pongs_got::Int64
core::TCore
end
PingPonger(peer, core) = PingPonger(peer, 0, 0, core)
struct Ping end
struct Pong end
struct CreatePeer end
function sendping(service, me::PingPonger)
send(service, me, me.peer, Ping())
me.pings_sent += 1
if me.pings_sent == MSG_COUNT
die(service, me)
end
end
function sendpong(service, me::PingPonger)
send(service, me, me.peer, Pong())
end
for i=1:1000
typename = Symbol("XX$i")
eval(quote
struct $typename end
CircoCore.onmessage(me::PingPonger, message::$typename, service::Service) = $i
end)
end
function CircoCore.onmessage(me::PingPonger, message::CreatePeer, service)
peer = PingPonger(addr(me), emptycore(service))
me.peer = spawn(service, peer)
sendping(service, me)
end
function CircoCore.onmessage(me::PingPonger, ::Ping, service)
sendpong(service, me)
return nothing
end
function CircoCore.onmessage(me::PingPonger, ::Pong, service)
me.pongs_got += 1
if me.pongs_got == MSG_COUNT
die(service, me)
else
sendping(service, me)
end
return nothing
end
function createbench(ctx)
pingers = [PingPonger(nothing, emptycore(ctx)) for i=1:1]
scheduler = Scheduler(ctx, pingers)
for pinger in pingers
deliver!(scheduler, addr(pinger), CreatePeer())
end
return (run! = () -> scheduler(; remote = false),
teardown! = () -> shutdown!(scheduler),
scheduler = scheduler)
end
function pingpongbench()
bench = createbench(ctx)
@time bench.run!()
bench.teardown!()
end
const ctx = CircoContext(;profile=CircoCore.Profiles.DefaultProfile())
using Main.Atom.Profiler
using Profile
using Main.Atom.Profiler.FlameGraphs
using AbstractTrees
AbstractTrees.children(d::Dict{Symbol,Any}) = d[:children] # To make the json dict iterable
pingers = [PingPonger(nothing, emptycore(ctx)) for i=1:1]
scheduler = Scheduler(ctx, pingers)
for pinger in pingers
deliver!(scheduler, addr(pinger), CreatePeer())
end
#run profiler here.........
@profiler scheduler(; remote = false)
data = Profile.fetch()
@info "Analyzing"
graph = FlameGraphs.flamegraph(data)
#Main.Atom.Profiler.pruneinternal!(graph)
#Main.Atom.Profiler.prunetask!(graph)
js = Main.Atom.Profiler.tojson(graph)
dynamic_dispatches = count(PostOrderDFS(js)) do l
d = "dynamic-dispatch" β l[:classes]
end
@show dynamic_dispatches
for l in PostOrderDFS(js)
if "dynamic-dispatch" β l[:classes]
println("$(l[:count]) - $(l[:location]):$(l[:line])")
end
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 403 | using Documenter, CircoCore
makedocs(;
modules=[CircoCore],
format=Documenter.HTML(
assets = ["assets/favicon.ico"]
),
pages=[
"index.md",
"reference.md",
],
repo="https://github.com/Circo-dev/CircoCore/blob/{commit}{path}#L{line}",
sitename="CircoCore",
authors="KrisztiΓ‘n Schaffer",
)
deploydocs(;
repo="github.com/Circo-dev/CircoCore",
)
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 7835 | # SPDX-License-Identifier: MPL-2.0
module CircoCore
export CircoContext, Scheduler, AbstractScheduler, run!, pause!,
Actor, ActorId, schedule!,
emptycore,
# Lifecycle
OnSpawn, OnDeath, OnBecome,
#Plugins reexport
Plugin, setup!, shutdown!, symbol,
#Plugins
plugin, getactorbyid, unschedule!,
ActivityService,
# Messaging
PostCode, postcode, PostOffice, PostException, postoffice, Addr, addr, box, port, AbstractMsg, Msg,
redirect, body, target, sender, nulladdr,
Token, TokenId, Tokenized, token, Request, Response, Failure, Timeout, TimerTimeout, settimeout,
# Actor API
send, bulksend,
spawn, become, die,
migrate,
getname, registername, NameQuery, NameResponse,
# Events
EventSource, Event, Subscribe, UnSubscribe, fire,
# Signals
SigTerm,
# Space
Pos, pos, nullpos, Infoton, Space
import Base: show, string
import Plugins
const Plugin = Plugins.Plugin
const shutdown! = Plugins.shutdown!
const setup! = Plugins.setup!
const symbol = Plugins.symbol
const hooks = Plugins.hooks
include("hooks.jl")
abstract type AbstractContext end
"""
ActorId
A cluster-unique id that is randomly generated when the actor is spawned (first scheduled).
`ActorId` is an alias to `UInt64` at the time, so it may pop up in error messages as such.
"""
ActorId = UInt64
"""
abstract type Actor{TCoreState}
Supertype of all actors.
Subtypes must be mutable and must provide a field `core::TCoreState`
that can remain undefined after creation.
# Examples
```julia
mutable struct DataHolder{TValue, TCore} <: Actor{TCore}
value::TValue
core::TCore
end
```
"""
abstract type Actor{TCoreState} end
coretype(::Actor{TCore}) where TCore = TCore
abstract type AbstractAddr end
postcode(address::AbstractAddr) = address.postcode
postcode(actor::Actor) = postcode(addr(actor))
box(address::AbstractAddr) = address.box
"""
PostCode
A string that identifies a scheduler.
# Examples
"192.168.1.11:24721"
"""
PostCode = String # TODO (perf): destructured storage
port(postcode::PostCode) = parse(UInt32, split(postcode, ":")[end])
network_host(postcode::PostCode) = SubString(postcode, 1, findfirst(":", postcode)[1])
invalidpostcode = "0.0.0.0:0"
postcode(::Any) = invalidpostcode
"""
Addr(postcode::PostCode, box::ActorId)
Addr(readable_address::String)
Addr()
The full address of an actor.
When created without arguments, it will be the null address. See [`isnulladdr()`](@ref)
If the referenced actor migrates to a different scheduler, messages sent to the
old address will bounce back as [`RecipientMoved`](@ref) and the Addr
must be updated manually.
# Examples
Addr("192.168.1.11:24721", 0xbc6ac81fc7e4ea2)
Addr("192.168.1.11:24721/bc6ac81fc7e4ea2")
"""
struct Addr <: AbstractAddr
postcode::PostCode
box::ActorId
end
nulladdr = Addr("", UInt64(0))
Addr() = nulladdr
Addr(box::ActorId) = Addr("", box)
Addr(readable_address::String) = begin
parts = split(readable_address, "/") # Handles only dns.or.ip:port[/actorid]
actorid = length(parts) == 2 ? parse(ActorId, parts[2], base=16) : 0
return Addr(parts[1], actorid)
end
string(a::Addr) = "$(a.postcode)/$(string(a.box, base=16))"
Base.convert(::Type{Addr}, x::Actor) = addr(x)
"""
isnulladdr(a::Addr)
Check if the given address is a null address, meaning that it points to "nowhere", messages
sent to it will be dropped.
"""
isnulladdr(a::Addr) = a == nulladdr
"""
box(a::Addr)::ActorId
Return the box of the address, that is the id of the actor.
When the actor migrates, its box remains the same, only the PostCode of the address changes.
"""
box(a)::ActorId = a.box
"""
isbaseaddress(addr::Addr)::Bool
Return true if `addr` is a base address, meaning it references a scheduler directly.
"""
isbaseaddress(addr::Addr) = box(addr) == 0
function Base.show(io::IO, a::Addr)
print(io, string(a))
end
"""
redirect(addr::Addr, topostcode::PostCode):Addr
Create a new Addr by replacing the postcode of the given one.
"""
redirect(addr::Addr, topostcode::PostCode) = Addr(topostcode, box(addr))
"""
addr(a::Actor)
Return the address of the actor.
Call this on a spawned actor to get its address. Throws `UndefRefError` if the actor is not spawned.
"""
addr(a::Actor) = a.core.addr::Addr
"""
addr(entity)
Return the address of entity.
The default implementation returns the `addr` field, allowing you to use your own structs
with such fields as message targets.
"""
addr(a) = a.addr
"""
box(a::Actor)
Return the 'P.O. box' of the spawned actor.
Call this on a spawned actor to get its id (aka box). Throws `UndefRefError` if the actor is not spawned.
"""
box(a::Actor) = box(addr(a))::ActorId
# Actor lifecycle messages
"""
OnSpawn
Actor lifecycle message that marks the first scheduling of the actor,
sent during spawning, before any other message.
# Examples
```julia
CircoCore.onmessage(me::MyActor, ::OnSpawn, service) = begin
registername(service, "MyActor", me) # Register this actor in the local name service
end
```
"""
struct OnSpawn end
"""
onmessage(me::Actor, message, service)
Actor callback to handle a message arriving at an actor.
Only the payload of the message is delivered, there is currently no way to access the infoton or the sender address.
If you need a reply, include the sender address in the request.
Note: Do not forget to import it or use its qualified name to allow overloading!
# Examples
```julia
import CircoCore.onmessage
struct TestRequest
replyto::Addr
end
struct TestResponse end
function onmessage(me::MyActor, message::TestRequest, service)
send(service, me, message.replyto, TestResponse())
end
```
"""
function onmessage(me::Actor, message, service) end
"""
OnDeath
Actor lifecycle message to release resources when the actor dies (meaning unscheduled "permanently").
The actor is still scheduled when this message is delivered,
but no more messages will be delivered after this.
"""
struct OnDeath end
"""
OnBecome(reincarnation::Actor)
Actor lifecycle message marking the `become()` action.
`reincarnation` points to the new incarnation of the actor.
`me` is scheduled at the delivery time of this message, `reincarnation` is not.
Exceptions thrown while handling `OnBecome`` will propagate to the initiating `become` call.
"""
struct OnBecome
reincarnation::Actor
end
# scheduler
abstract type AbstractScheduler{TMsg, TCoreState} end
addr(scheduler::AbstractScheduler) = Addr(postcode(scheduler), 0)
abstract type Delivery <: Plugin end
Plugins.symbol(::Delivery) = :delivery
const PORT_RANGE = 24721:24999
abstract type PostOffice <: Plugin end
Plugins.symbol(::PostOffice) = :postoffice
postcode(post::PostOffice) = post.postcode
addr(post::PostOffice) = Addr(postcode(post), 0)
struct PostException
message::String
end
abstract type LocalRegistry <: Plugin end
Plugins.symbol(::LocalRegistry) = :registry
abstract type SparseActivity <: Plugin end
Plugins.symbol(::SparseActivity) = :sparseactivity
abstract type Space <: Plugin end
abstract type EuclideanSpace <: Space end
Plugins.symbol(::Space) = :space
abstract type Positioner <: Plugin end
Plugins.symbol(::Positioner) = :positioner
# naming
function registername end
function getname end
include("actorstore.jl")
include("msg.jl")
include("onmessage.jl")
include("signal.jl")
include("zmq_postoffice.jl")
#include("udp_postoffice.jl")
include("token.jl")
include("registry.jl")
include("service.jl")
include("sparse_activity.jl")
include("space.jl")
include("positioning.jl")
include("profiles.jl")
include("context.jl")
include("scheduler.jl")
include("event.jl")
include("classic.jl")
function __init__()
Plugins.register(EuclideanSpaceImpl)
Plugins.register(OnMessageImpl)
end
end # module
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1175 | struct StoreKey
key::ActorId
end
Base.hash(a::StoreKey, h::UInt) = xor(a.key, h)
Base.:(==)(a::StoreKey, b::StoreKey) = a.key == b.key
struct ActorStore{T}
cache::Dict{StoreKey, T}
ActorStore{T}(args...) where T = new{T}(Dict([map(p -> Pair(StoreKey(p.first), p.second), args)]...))
end
ActorStore(args...) = ActorStore{Any}(args...)
Base.getindex(s::ActorStore, key::ActorId) = s.cache[StoreKey(key)]
Base.setindex!(s::ActorStore, actor, key::ActorId) = s.cache[StoreKey(key)] = actor
Base.haskey(s::ActorStore, key::ActorId) = haskey(s.cache,StoreKey(key))
Base.get(s::ActorStore, key::ActorId, default) = get(s.cache, StoreKey(key), default)
Base.get(f::Function, s::ActorStore, key) = get(f, s.cache, StoreKey(key))
Base.delete!(s::ActorStore, key::ActorId) = delete!(s.cache, StoreKey(key))
Base.pop!(s::ActorStore, key::ActorId, default) = pop!(s.cache, StoreKey(key), default)
Base.length(s::ActorStore) = length(s.cache)
Base.iterate(s::ActorStore, state...) = begin
inner = iterate(s.cache, state...)
isnothing(inner) && return nothing
return (Pair(inner[1][1].key, inner[1][2]), inner[2])
end
Base.values(s::ActorStore) = values(s.cache)
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1796 | import ActorInterfaces
struct CircoCtx{TActor, TService, TCore}
actor::TActor
service::TService
CircoCtx(actor::Actor{TCore}, service) where TCore = new{typeof(actor), typeof(service), TCore}(actor, service)
end
mutable struct ClassicActor{TState, TCore} <: Actor{TCore}
state::TState
core::TCore
end
struct CircoAddr <: ActorInterfaces.Classic.Addr
addr::Addr
end
import Base.convert
function Base.convert(::Type{Addr}, a::CircoAddr)
return a.addr
end
function Base.convert(::Type{ActorInterfaces.Classic.Addr}, a::Addr)
return CircoAddr(a)
end
struct CircoMessage
addr::Addr
end
function onmessage(me::ClassicActor, msg, service)
ActorInterfaces.Classic.onmessage(me.state, msg, CircoCtx(me, service))
end
function onmessage(me::ClassicActor, msg::Union{OnSpawn, OnDeath, OnBecome}, service)
@debug "Lifecycle events are not delivered to classic" # TODO add lifecycle to classic
end
function ActorInterfaces.Classic.self(ctx::CircoCtx)::CircoAddr
return CircoAddr(addr(ctx.actor))
end
function ActorInterfaces.Classic.send(recipient::CircoAddr, msg, ctx::CircoCtx)
send(ctx.service, ctx.actor, recipient.addr, msg)
end
function ActorInterfaces.Classic.spawn(behavior, ctx::CircoCtx)::CircoAddr
spawned = spawn(ctx.service, ClassicActor(behavior, emptycore(ctx.service)))
return CircoAddr(spawned)
end
function ActorInterfaces.Classic.become(behavior, ctx::CircoCtx)
CircoCore.become(ctx.service, ctx.actor, ClassicActor(behavior, ctx.actor.core))
return nothing
end
spawn(scheduler::AbstractScheduler, actor) = spawn(scheduler, ClassicActor(actor, emptycore(scheduler.service)))
function send(scheduler::AbstractScheduler, to::CircoAddr, msgbody; kwargs...)
send(scheduler, to.addr, msgbody; kwargs...)
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2082 | # SPDX-License-Identifier: MPL-2.0
"""
CircoContext(;options...) <: AbstractContext
Store configuration, manage staging and run-time code optimizations for Circo.
"""
struct CircoContext <: AbstractContext
userpluginsfn::Union{Nothing, Function}
profile::Profiles.AbstractProfile
plugins::Plugins.PluginStack
options
corestate_type::Type
msg_type::Type
end
function CircoContext(; options...)
target_module = get(options, :target_module, Main)
# Use :profilefn if provided, :profile otherwise
directprofget = (;opts...) -> ((;opts...) -> get(() -> Profiles.DefaultProfile(;opts...), opts, :profile))
profilefn = get(directprofget, options, :profilefn)
profile = profilefn(;options...)
userpluginsfn = get(() -> (() -> []), options, :userpluginsfn)
plugins = instantiate_plugins(profile, userpluginsfn)
types = generate_types(plugins; target_module)
ctx = CircoContext(userpluginsfn, profile, plugins, options, types...)
call_lifecycle_hook(ctx, prepare_hook)
return ctx
end
Base.show(io::IO, ::MIME"text/plain", ctx::CircoContext) = begin
print(io, "CircoContext with $(length(ctx.plugins)) plugins")
end
function instantiate_plugins(profile, userpluginsfn)
userplugins = userpluginsfn()
if !(userplugins isa AbstractArray) && !(userplugins isa Tuple)
error("The userpluginsfn option of CircoContext should return a tuple or an array.")
end
allplugins = [userplugins..., Profiles.core_plugins(profile)...]
return Plugins.PluginStack(allplugins, scheduler_hooks; profile.options...)
end
function instantiate_plugins(ctx::AbstractContext)
return instantiate_plugins(ctx.profile, ctx.userpluginsfn)
end
function generate_types(pluginstack::Plugins.PluginStack; target_module=Main)
return (
corestate_type = Plugins.customtype(pluginstack, :CoreState, AbstractCoreState, Symbol[], target_module),
msg_type = Plugins.customtype(pluginstack, :Msg, AbstractMsg, [:TBody], target_module),
)
end
emptycore(ctx::AbstractContext) = ctx.corestate_type()
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 3991 | # SPDX-License-Identifier: MPL-2.0
abstract type Event end
abstract type OneShotEvent <: Event end
abstract type RecurringEvent <: Event end
const RecurrentEvent = RecurringEvent
"""
Subscribe(eventtype::Type, subscriber::Union{Actor, Addr}, filter::Union{Nothing, String, Function} = nothing)
Message for subscribing to events of the given `eventtype`.
The subscription can be optionally filtered by a topic string or a predicate function.
Filtering and subscription management will be done by the event dispatcher, which is a separate actor.
`eventtype` must be concrete.
# Examples
```julia
fs = getname(service, "fs")
send(service, me, fs, Subscribe(FSEvent, me, "MODIFY"))
send(service, me, fs, Subscribe(FSEvent, me, event -> event.path == "test.txt"))
```
"""
struct Subscribe # TODO: <: Request + handle forwarding
subscriber::Addr
filter::Union{Nothing, String, Function}
eventtype::Type
Subscribe(eventtype:: Type, subscriber::Actor, filter=nothing) = new(addr(subscriber), filter, eventtype)
Subscribe(eventtype:: Type, subscriber::Addr, filter=nothing) = new(subscriber, filter, eventtype)
end
"""
Unsubscribe(subscriber::Addr, eventtype::Type)
Message for unsubscribing from events of the given `eventtype`.
Cancels all subscriptions of the given `subscriber` for the given `eventtype`.
"""
struct UnSubscribe
subscriber::Addr
eventtype::Type
end
"""
EventSource
Trait for actors that can publish events.
Manages subscriptions and dispatches events.
You need to add a field `eventdispatcher::Addr` to your actor to use this trait.
"""
struct EventSource end
function initdispatcher(me::Actor, service)
@assert hasfield(typeof(me), :eventdispatcher) "Missing field 'eventdispatcher::Addr' in $(typeof(me))"
@assert !isdefined(me, :eventdispatcher) || me.eventdispatcher == Addr()
me.eventdispatcher = spawn(service, EventDispatcher(emptycore(service)))
end
ontraitmessage(::EventSource, me::Actor, msg::Union{Subscribe, UnSubscribe}, service) = begin
send(service, me, me.eventdispatcher, msg)
end
ontraitmessage(::EventSource, me::Actor, msg::OnSpawn, service) = begin
initdispatcher(me, service)
end
"""
EventSourceDied
SigTerm cause for terminating event dispatchers.
"""
struct EventSourceDied end
ontraitmessage(::EventSource, me::Actor, msg::OnDeath, service) = begin
send(service, me, me.eventdispatcher, SigTerm(EventSourceDied()))
end
"""
fire(service, me::Actor, event::Event)
Fire an event on the actor to be delivered by the actor's eventdispatcher.
To fire an event, the actor must have a field `eventdispatcher::Addr`,
which will be filled automatically.
"""
function fire(service, me::Actor, event::TEvent) where TEvent <: Event
send(service, me, me.eventdispatcher, event)
end
mutable struct EventDispatcher{TCore} <: Actor{TCore}
listeners::IdDict #{Type{<:Event}, Array{Subscribe}}
core::TCore
end
EventDispatcher(core) = EventDispatcher(IdDict(), core)
function onmessage(me::EventDispatcher, msg::Subscribe, service)
if !haskey(me.listeners, msg.eventtype)
me.listeners[msg.eventtype] = Array{Subscribe}(undef, 0)
end
push!(me.listeners[msg.eventtype], msg) # TODO ack
end
function onmessage(me::EventDispatcher, msg::UnSubscribe, service)
if !haskey(me.listeners, msg.eventtype)
return
end
filter!(me.listeners[msg.eventtype]) do sub
return sub.eventtype == msg.eventtype && sub.subscriber == msg.subscriber
end
# TODO ack
end
function onmessage(me::EventDispatcher, msg::Event, service)
for subscription in get(me.listeners, typeof(msg), [])
if subscription.filter isa Nothing ||
(hasfield(typeof(msg), :topic) && subscription.filter isa String && msg.topic == subscription.filter) ||
(subscription.filter isa Function && subscription.filter(msg) == true)
send(service, me, subscription.subscriber, msg)
end
end
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2696 | # SPDX-License-Identifier: MPL-2.0
# Lifecycle hooks
prepare(::Plugin, ::Any) = false
schedule_start(::Plugin, ::Any) = false
schedule_pause(::Plugin, ::Any) = false
schedule_continue(::Plugin, ::Any) = false
schedule_stop(::Plugin, ::Any) = false
stage(::Plugin, ::Any) = false
prepare_hook = Plugins.create_lifecyclehook(prepare) # For staging. Will be called only once per ctx, do not use the plugin instance!
schedule_start_hook = Plugins.create_lifecyclehook(schedule_start)
schedule_pause_hook = Plugins.create_lifecyclehook(schedule_pause)
schedule_continue_hook = Plugins.create_lifecyclehook(schedule_continue)
schedule_stop_hook = Plugins.create_lifecyclehook(schedule_stop)
stage_hook = Plugins.create_lifecyclehook(stage)
# Event hooks
function actor_activity_sparse16 end # An actor just received a message, called with 1/16 probability
function actor_activity_sparse256 end # An actor just received a message, called with 1/256 probability
function actor_spawning end # called when the actor is already spawned, but before delivering OnSpawn.
function actor_dying end # called when the actor will die, but before delivering OnDeath.
function actor_state_write end # A write to an actor state will be applied (transaction commit)
function idle end # called irregularly while the message queue is empty.
function letin_remote end # Let external sources push messages into the queue (using deliver!).
function localdelivery end # deliver a message to an actor (e.g. call onmessage)
function localroutes end # Handle messages that are targeted to actors not (currently) scheduled locally (e.g. during migration).
function remoteroutes end # Deliver messages to external targets
function spawnpos end # Provide initial position of an actor when it is spawned
function specialmsg end # Handle messages that are targeted to the scheduler (to the box 0)
scheduler_hooks = [remoteroutes, localdelivery, actor_spawning, actor_dying,
actor_state_write,
localroutes, specialmsg, letin_remote,
actor_activity_sparse16, actor_activity_sparse256, idle, spawnpos]
# Plugin-assembled types
abstract type AbstractCoreState end
abstract type AbstractMsg{TBody} end # TODO rename to AbstractEnvelope
# Helpers
function call_lifecycle_hook(target, lfhook, args...)
res = lfhook(target.plugins, target, args...)
if !res.allok
for (i, result) in enumerate(res.results)
if result isa Tuple && result[1] isa Exception
trimhook(s) = endswith(s, "_hook") ? s[1:end-5] : s
@error "Error in calling '$(trimhook(string(lfhook)))' lifecycle hook of plugin $(typeof(target.plugins[i])):" result
end
end
end
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 987 | struct MsgTemplate <: Plugins.TemplateStyle end
Plugins.TemplateStyle(::Type{AbstractMsg}) = MsgTemplate()
Plugins.typedef(::MsgTemplate, spec) = quote
struct TYPE_NAME{TBody} <: CircoCore.AbstractMsg{TBody}
sender::CircoCore.Addr
target::CircoCore.Addr
body::TBody
$(Plugins.structfields(spec))
end;
TYPE_NAME(sender::CircoCore.Addr, target, body, args...; kwargs...) =
TYPE_NAME{typeof(body)}(sender, target, body, $(msgfieldcalls(spec)...))
TYPE_NAME(sender::CircoCore.Actor, target, body, args...; kwargs...) =
TYPE_NAME{typeof(body)}(CircoCore.addr(sender), target, body, $(msgfieldcalls(spec)...))
TYPE_NAME
end
msgfieldcalls(spec) = map(field -> :($(field.constructor)(sender, target, body, args...; kwargs...)), spec.fields)
sender(m::AbstractMsg) = m.sender::Addr
target(m::AbstractMsg) = m.target::Addr
body(m::AbstractMsg) = m.body
redirect(m::AbstractMsg, to::Addr) = (typeof(m))(target(m), to, body(m))
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1767 | # SPDX-License-Identifier: MPL-2.0
abstract type OnMessage <: Delivery end
mutable struct OnMessageImpl <: OnMessage
OnMessageImpl(;options...) = new()
end
@inline localdelivery(::OnMessage, scheduler, msg, targetactor) = begin
_body = body(msg)
_service = scheduler.service
for trait in traits(typeof(targetactor))
ontraitmessage(trait isa Type ? trait() : trait, targetactor, _body, _service)
end
onmessage(targetactor, _body, _service)
return false
end
"""
traits(::Type{<:Actor}) = ()
You can declare the traits of an actor by defining a method of this function.
Traits can handle messages in the name of the actor,
helping to compose the behavior of the actor (See [`ontraitmessage()`](@ref).).
E.g.: The EventSource trait handles the Subscribe and UnSubscribe messages automatically (among others).
Anything can be a trait, but we recommend to use immutable structs.
Return a tuple of traits, either instantiated or not.
Instantiated traits can hold values, while
traits given as types will be instantiated without arguments.
Important: Traits _cannot_ hold state.
If a trait needs to store state in the actor you have to add fields to the actor manually.
# Examples
struct DumpFieldTrait # Dumps a single field of the actor to stdout when the actor is dying.
fieldname::Symbol
end
CircoCore.ontraitmessage(trait::DumpFieldTrait, me, ::OnDeath, service) = begin
println("Β¨\$(trait.fieldname): \$(getfield(me, trait.fieldname))")
end
mutable struct MyActor <: Actor{Any}
a
b
core
MyActor() = new(rand(Int8), rand(Int8))
end
CircoCore.traits(::Type{MyActor}) = (DumpFieldTrait(:a), DumpFieldTrait(:b))
"""
traits(::Type{<:Actor}) = ()
ontraitmessage(trait, me, msg, service) = nothing
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2055 | module Positioning
using Random
using Plugins
using ..CircoCore
const HOST_VIEW_SIZE = 1000 # TODO eliminate
mutable struct SimplePositioner <: CircoCore.Positioner
isroot::Bool
hostid::UInt64 # TODO: eliminate non-core notions
center::Pos
SimplePositioner(;options...) = new(
length(get(options, :roots, [])) == 0 # TODO eliminate dirtiness
)
end
__init__() = Plugins.register(SimplePositioner)
function randpos(rng = Random.GLOBAL_RNG)
return Pos(
rand(rng, Float32) * HOST_VIEW_SIZE - HOST_VIEW_SIZE / 2,
rand(rng, Float32) * HOST_VIEW_SIZE - HOST_VIEW_SIZE / 2,
rand(rng, Float32) * HOST_VIEW_SIZE - HOST_VIEW_SIZE / 2
)
end
flat_gridpoints(grids) = Pos.(vec(collect(Iterators.product(grids...))))
function gridpos(idx)
@assert idx < 17300 # This method may fail to generate unique values for higher idxes (and it is slow anyway).
edge_length = floor(idx^(1/3)) / 2 + 1
edge = -edge_length:edge_length
points = sort(flat_gridpoints((edge, edge, edge)))
return points[idx]
end
function hostrelative_schedulerpos(positioner, postcode)
# return randpos()
p = port(postcode)
return gridpos(p - 24721 + 1) * HOST_VIEW_SIZE
end
function hostpos(positioner, postcode)
if positioner.isroot
return Pos(0, 0, 0)
else
rng = MersenneTwister(positioner.hostid)
return randpos(rng) * 5.0
end
end
function Plugins.setup!(p::SimplePositioner, scheduler)
postoffice = get(scheduler.plugins, :postoffice, nothing)
host = get(scheduler.plugins, :host, nothing)
p.hostid = isnothing(host) ? 0 : host.hostid
if isnothing(postoffice)
p.center = nullpos
scheduler.pos = randpos()
else
p.center = hostpos(p, postcode(postoffice))
scheduler.pos = p.center + hostrelative_schedulerpos(p, postcode(postoffice))
end
return nothing
end
function CircoCore.spawnpos(p::SimplePositioner, scheduler, actor, result::Ref{Pos})
result[] = randpos() + p.center
return true
end
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1067 | module Profiles
import ..CircoCore
abstract type AbstractProfile end
function core_plugins end
struct EmptyProfile <: AbstractProfile
options
EmptyProfile(;options...) = new(options)
end
core_plugins(::EmptyProfile) = []
struct MinimalProfile <: AbstractProfile
options
MinimalProfile(;options...) = new(options)
end
core_plugins(profile::MinimalProfile) = [CircoCore.OnMessage]
struct TinyProfile <: AbstractProfile
options
TinyProfile(;options...) = new(options)
end
function core_plugins(profile::TinyProfile)
options = profile.options
return [
CircoCore.LocalRegistry,
CircoCore.SparseActivity,
CircoCore.Space,
core_plugins(MinimalProfile(;options...))...,
]
end
struct DefaultProfile <: AbstractProfile
options
DefaultProfile(;options...) = new(options)
end
function core_plugins(profile::DefaultProfile)
options = profile.options
return [
CircoCore.Positioner,
CircoCore.PostOffice,
core_plugins(TinyProfile(;options...))...,
]
end
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2443 | # SPDX-License-Identifier: MPL-2.0
# Local, in-scheduler registry that can be queried from the outside
module Registry
using Plugins
using ..CircoCore
export NameQuery, NameResponse
"""
NameQuery(name::String) <: Request
A query that can be sent to a remote scheduler for querying its local registry.
"""
struct NameQuery <: Request # TODO add respondto
name::String
token::Token
NameQuery(name, token) = new(name, token)
NameQuery(name) = new(name, Token())
end
struct NameResponse <: Response
query::NameQuery
handler::Union{Addr, Nothing}
token::Token
end
mutable struct RegistryHelper{TCore} <: Actor{TCore}
registry::Any
core::TCore
end
mutable struct LocalRegistryImpl <: CircoCore.LocalRegistry
register::Dict{String, Addr}
helperactor::RegistryHelper
LocalRegistryImpl(;options...) = new(Dict())
end
__init__() = Plugins.register(LocalRegistryImpl)
abstract type RegistryException end
struct RegisteredException <: RegistryException
name::String
end
Base.show(io::IO, e::RegisteredException) = print(io, "name '", e.name, "' already registered")
struct NoRegistryException <: RegistryException
msg::String
end
CircoCore.schedule_start(registry::LocalRegistryImpl, scheduler) = begin
registry.helperactor = RegistryHelper(registry, emptycore(scheduler.service))
spawn(scheduler, registry.helperactor)
end
function CircoCore.registername(registry::LocalRegistryImpl, name::String, handler::Addr, initiator::Union{Addr,Nothing}=nothing)
if !isnothing(initiator) && initiator != handler
if haskey(registry.register, name)
throw(RegisteredException(name))
end
@info "Registering name $name to $handler by another actor: $(addr(initiator_actor))"
end
registry.register[name] = handler
return true
end
function CircoCore.getname(registry::LocalRegistryImpl, name::String)::Union{Addr, Nothing}
get(registry.register, name, nothing)
end
CircoCore.specialmsg(registry::LocalRegistryImpl, scheduler, message) = false
CircoCore.specialmsg(registry::LocalRegistryImpl, scheduler, message::AbstractMsg{NameQuery}) = begin
@debug "Registry specialmsg $message"
send(scheduler.service,
registry.helperactor,
sender(message),
NameResponse(body(message),
getname(registry, body(message).name),
body(message).token)
)
return true
end
end # module | CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 13038 | # SPDX-License-Identifier: MPL-2.0
using DataStructures
@enum SchedulerState::Int8 created=0 running=10 paused=20 stopped=30
struct StateChangeError <: Exception
from::SchedulerState
to::SchedulerState
end
mutable struct Scheduler{THooks, TMsg, TCoreState} <: AbstractScheduler{TMsg, TCoreState} # TODO THooks -> TSchedulerState
pos::Pos # TODO -> state
postcode::PostCode # TODO -> state
actorcount::UInt64
actorcache::ActorStore{Any}
msgqueue::Deque{Any}# CircularBuffer{Msg}
tokenservice::TokenService
state::SchedulerState # TODO state::TSchedulerState , plugin-assembled
maintask::Union{Task, Nothing} # The task that runs the event loop
lock::ReentrantLock # TODO -> state (?)
startcond::Threads.Condition # TODO -> state
pausecond::Threads.Condition # TODO -> state
startup_actor_count::UInt16 # Number of actors created by plugins TODO eliminate
plugins::Plugins.PluginStack
hooks::THooks # TODO -> state
zygote::AbstractArray
exitflag::Bool
service::Service{Scheduler{THooks, TMsg, TCoreState}, TMsg, TCoreState}
function Scheduler(
ctx::AbstractContext,
zygote::AbstractArray = [];
pos = nullpos, # TODO: eliminate
# msgqueue_capacity = 100_000
)
plugins = instantiate_plugins(ctx)
_hooks = hooks(plugins)
_lock = ReentrantLock()
scheduler = new{typeof(_hooks), ctx.msg_type, ctx.corestate_type}(
pos,
invalidpostcode,
0,
ActorStore(),
Deque{Any}(),#msgqueue_capacity),
TokenService(),
created,
nothing,
_lock,
Threads.Condition(_lock),
Threads.Condition(_lock),
0,
plugins,
_hooks,
zygote,
false)
scheduler.service = Service(ctx, scheduler)
call_lifecycle_hook(scheduler, setup!)
postoffice = get(plugins, :postoffice, nothing) # TODO eliminate
scheduler.postcode = isnothing(postoffice) ? invalidpostcode : postcode(postoffice) # TODO eliminate
return scheduler
end
end
Base.show(io::IO, ::Type{<:Scheduler}) = print(io, "Scheduler")
Base.show(io::IO, ::MIME"text/plain", scheduler::AbstractScheduler) = begin
print(io, "Scheduler at $(postcode(scheduler)) with $(scheduler.actorcount) actors")
end
pos(scheduler::AbstractScheduler) = scheduler.pos # TODO find a better location
postcode(scheduler::AbstractScheduler) = scheduler.postcode # TODO find a better location
function setstate!(scheduler::AbstractScheduler, newstate::SchedulerState)
callcount = 0
callhook(hook) = begin
call_lifecycle_hook(scheduler, hook)
callcount += 1
end
curstate = scheduler.state
curstate == newstate && return newstate
if newstate == running
if curstate == created || curstate == stopped
actorcount = scheduler.actorcount
callhook(schedule_start_hook)
callhook(schedule_continue_hook)
scheduler.startup_actor_count = scheduler.actorcount - actorcount # TODO not just count and not here
for a in scheduler.zygote; spawn(scheduler, a); end
elseif curstate == paused
callhook(schedule_continue_hook)
end
elseif newstate == paused
if curstate == running
callhook(schedule_pause_hook)
end
elseif newstate == stopped
if curstate == running
callhook(schedule_pause_hook)
callhook(schedule_stop_hook)
elseif curstate == paused || curstate == created
callhook(schedule_stop_hook)
end
end
callcount == 0 && throw(StateChangeError(scheduler.state, newstate))
scheduler.state = newstate
return newstate
end
function lockop(op::Function, cond)
lock(cond)
try
op(cond)
finally
unlock(cond)
end
end
function lockop(op::Function, obj, cond_sym::Symbol)
lockop(op, getfield(obj, cond_sym))
end
function pause!(scheduler)
setstate!(scheduler, paused)
lockop(wait, scheduler, :pausecond)
return nothing
end
# TODO collect startup info from a hook
logstart(scheduler::AbstractScheduler) = scheduler.state == created && @info "Circo scheduler starting on thread $(Threads.threadid())"
function run!(scheduler; nowait = false, kwargs...)
isrunning(scheduler) && throw(StateChangeError(scheduler.state, running))
logstart(scheduler)
task = @async eventloop(scheduler; kwargs...)
nowait && return task
lockop(wait, scheduler, :startcond)
return task
end
isrunning(scheduler) = scheduler.state == running
# For external calls # TODO find a better place
function send(scheduler::AbstractScheduler{TMsg, TCoreState}, from::Addr, to::Addr, msgbody; kwargs...) where {TMsg, TCoreState}
msg = TMsg(from, to, msgbody, scheduler; kwargs...)
deliver!(scheduler, msg)
end
function send(scheduler::AbstractScheduler, to::Addr, msgbody; kwargs...)
send(scheduler, Addr(), to, msgbody; kwargs...)
end
function send(scheduler::AbstractScheduler, to::Actor, msgbody; kwargs...)
send(scheduler, addr(to), msgbody; kwargs...)
end
@inline function deliver!(scheduler::AbstractScheduler, msg::AbstractMsg)
# @debug "deliver! at $(postcode(scheduler)) $msg" # degrades ping-pong perf even if debugging is not enabled
target_postcode = postcode(target(msg)) # TODO eliminate
if postcode(scheduler) === target_postcode
deliver_locally!(scheduler, msg)
return nothing
end
if !scheduler.hooks.remoteroutes(scheduler, msg)
@info "Unhandled remote delivery: $msg"
end
return nothing
end
@inline function deliver_locally!(scheduler::AbstractScheduler, msg::AbstractMsg)
deliver_locally_kern!(scheduler, msg)
return nothing
end
@inline function deliver_locally!(scheduler::AbstractScheduler, msg::AbstractMsg{<:Response})
cleartimeout(scheduler.tokenservice, token(msg.body))
deliver_locally_kern!(scheduler, msg)
return nothing
end
@inline function deliver_locally_kern!(scheduler::AbstractScheduler, msg::AbstractMsg)
if box(target(msg)) == 0 # TODO (?) always push, check later only if target not found
if !scheduler.hooks.specialmsg(scheduler, msg)
@debug("Unhandled special message: $msg")
end
else
push!(scheduler.msgqueue, msg)
end
return true
end
@inline function fill_corestate!(scheduler::AbstractScheduler{TMsg, TCoreState}, actor) where {TMsg, TCoreState}
actorid = !isdefined(actor, :core) || box(actor) == 0 ? rand(ActorId) : box(actor)
actor.core = TCoreState(scheduler, actor, actorid)
return nothing
end
@inline is_scheduled(scheduler::AbstractScheduler, actor::Union{Actor, Addr}) = haskey(scheduler.actorcache, box(actor))
function spawn(scheduler::AbstractScheduler{TMsg}, actor::Actor) where {TMsg}
isfirstschedule = !isdefined(actor, :core) || box(actor) == 0
if !isfirstschedule && is_scheduled(scheduler, actor)
error("Actor already spawned")
end
fill_corestate!(scheduler, actor)
schedule!(scheduler, actor)
scheduler.actorcount += 1
if isfirstschedule
scheduler.hooks.actor_spawning(scheduler, actor)
_immediate_delivery(actor, scheduler,
TMsg(addr(scheduler), actor, OnSpawn(), scheduler)
)
end
return addr(actor)
end
@inline function schedule!(scheduler::AbstractScheduler, actor::Actor)::Addr
scheduler.actorcache[box(actor)] = actor
return addr(actor)
end
function kill!(scheduler::AbstractScheduler{TMsg}, actor::Actor) where {TMsg}
scheduler.hooks.actor_dying(scheduler, actor)
try
_immediate_delivery(actor, scheduler, TMsg(addr(scheduler), actor, OnDeath(), scheduler))
catch e
@warn "Exception in handling OnDeath of actor $(addr(actor)). Unscheduling anyway." exception = (e, catch_backtrace())
end
if is_scheduled(scheduler, actor)
unschedule!(scheduler, actor)
scheduler.actorcount -= 1
else
error("Actor wasn't scheduled!")
end
end
@inline function unschedule!(scheduler::AbstractScheduler, actor::Actor)
if is_scheduled(scheduler, actor)
delete!(scheduler.actorcache, box(actor))
end
return nothing
end
@inline function step!(scheduler::AbstractScheduler)
msg = popfirst!(scheduler.msgqueue)
_immediate_delivery(msg, scheduler)
return nothing
end
@inline function _immediate_delivery(msg, scheduler)
targetbox = target(msg).box::ActorId
targetactor = get(scheduler.actorcache, targetbox, nothing)
_immediate_delivery(targetactor, scheduler, msg)
end
@inline function _immediate_delivery(targetactor, scheduler, msg)
if isnothing(targetactor)
if !scheduler.hooks.localroutes(scheduler, msg)
@debug "Cannot deliver on host: $msg"
end
else
scheduler.hooks.localdelivery(scheduler, msg, targetactor)
end
return nothing
end
@inline function checktimeouts(scheduler::AbstractScheduler{TMsg, TCoreState}) where {TMsg, TCoreState}
needchecktimeouts!(scheduler.tokenservice) || return false
firedtimeouts = poptimeouts!(scheduler.tokenservice)
if length(firedtimeouts) > 0
@debug "Fired timeouts: $firedtimeouts"
for timeout in firedtimeouts
deliver_locally!(scheduler, TMsg(
addr(scheduler),
timeout.watcher,
timeout,
scheduler)
)
end
return true
end
return false
end
@inline function safe_sleep(sleeplength)
try
sleep(sleeplength)
catch e # EOFError happens
if e isa InterruptException
rethrow(e)
else
@info "Exception while sleeping: $e"
end
end
end
@inline function process_remote_and_timeout(scheduler::AbstractScheduler)
incomingmsg = nothing
hadtimeout = false
sleeplength = 0.001
enter_ts = time_ns()
while true
yield() # Allow plugin tasks to run
scheduler.hooks.letin_remote(scheduler)
hadtimeout = checktimeouts(scheduler)
if haswork(scheduler) || !isrunning(scheduler)
return nothing
else
scheduler.hooks.idle(scheduler)
safe_sleep(sleeplength)
if time_ns() - enter_ts > 1_000_000
sleeplength = min(sleeplength * 1.002, 0.03)
end
end
end
end
@inline function haswork(scheduler::AbstractScheduler)
return !isempty(scheduler.msgqueue)
end
@inline function nomorework(scheduler::AbstractScheduler, remote::Bool)
return !haswork(scheduler) && !remote
end
function eventloop(scheduler::AbstractScheduler; remote = true)
try
if isnothing(scheduler.maintask)
scheduler.maintask = current_task()
end
setstate!(scheduler, running)
scheduler.exitflag = false
lockop(notify, scheduler, :startcond)
while true
msg_batch = UInt8(255)
while msg_batch != 0 && haswork(scheduler) && !scheduler.exitflag
msg_batch -= UInt8(1)
step!(scheduler)
end
if !isrunning(scheduler) || nomorework(scheduler, remote) || scheduler.exitflag
@debug "Scheduler loop $(postcode(scheduler)) exiting."
return
end
process_remote_and_timeout(scheduler)
end
catch e
if e isa InterruptException
@info "Interrupt to scheduler on thread $(Threads.threadid())"
else
@error "Error while scheduling on thread $(Threads.threadid())" exception = (e, catch_backtrace())
end
finally
isrunning(scheduler) && setstate!(scheduler, paused)
scheduler.exitflag = false
lockop(notify, scheduler, :pausecond)
@assert !isnothing(scheduler.maintask)
if scheduler.maintask != current_task()
yieldto(scheduler.maintask)
else
scheduler.maintask = nothing
end
end
end
function (scheduler::AbstractScheduler)(;remote = true)
logstart(scheduler)
eventloop(scheduler; remote = remote)
end
# NOTE remote keyword signals that there may be remote connection to actors and shouldn't stop automatically. In this case ( remot = true) scheduling stop when the last actor die() function called with "exit = true" keyword
function (scheduler::AbstractScheduler)(msgs; remote = false)
if msgs isa AbstractMsg
msgs = [msgs]
end
for msg in msgs
deliver!(scheduler, msg)
end
scheduler(;remote = remote)
end
function shutdown!(scheduler::AbstractScheduler)
setstate!(scheduler, stopped)
call_lifecycle_hook(scheduler, shutdown!)
@debug "Scheduler at $(postcode(scheduler)) exited."
end
# Helpers for plugins
getactorbyid(scheduler::AbstractScheduler, id::ActorId) = get(scheduler.actorcache, id, nothing)
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 7725 | # SPDX-License-Identifier: MPL-2.0
abstract type AbstractService{TScheduler, TMsg, TCore} end
struct Service{TScheduler, TMsg, TCore} <: AbstractService{TScheduler, TMsg, TCore}
scheduler::TScheduler
emptycore::TCore
end
Base.show(io::IO, ::Type{<:AbstractService}) = print(io, "Circo Service")
Service(ctx::AbstractContext, scheduler::AbstractScheduler) =
Service{typeof(scheduler), ctx.msg_type, ctx.corestate_type}(scheduler, emptycore(ctx))
emptycore(s::AbstractService) = s.emptycore
emptycore(sdl::AbstractScheduler) = emptycore(sdl.service)
plugin(service::AbstractService, symbol::Symbol) = plugin(service.scheduler, symbol)
plugin(sdl::AbstractScheduler, symbol::Symbol) = sdl.plugins[symbol]
"""
send(service, sender::Actor, to::Addr, messagebody::Any; energy::Real = 1, timeout::Real = 2.0)
Send a message from an actor to an another.
Part of the actor API, can be called from a lifecycle callback, providing the `service` you got.
`messagebody` can be of any type, but a current limitation of inter-node communication is
that the serialized form of `messagebody` must fit in an IPv4 UDP packet with ~100 bytes margin.
The exact value depends on the MTU size of the network and changing implementation details, but 1380 bytes
can be considered safe. You may be able to tune your system to get higher values.
If `messagebody` is a `Request`, a timeout will be set for the token of it. The `timeout` keyword argument
can be used to control the deadline (seconds).
`energy` sets the energy and sign of the Infoton attached to the message (if the infoton optimizer is running).
# Examples
```julia
const QUERY = "The Ultimate Question of Life, The Universe, and Everything."
mutable struct MyActor <: Actor{TCoreState}
searcher::Addr
core::CoreState
MyActor() = new()
end
struct Start end
struct Search
query::String
end
[...] # Spawn the searcher or receive its address
function CircoCore.onmessage(me::MyActor, message::Start, service)
send(service,
me,
me.searcher,
Search(QUERY, addr(me)))
end
```
# Implementation
Please note that `service` is always the last argument of lifecycle callbacks
like `onmessage`.
It's because `onmessage` is dynamically dispatched, and `service` provides no
information about where to dispatch. (Only one service instance exists
as of `v"0.2.0"`) Listing it at the end improves performance.
On the other hand, actor API endpoints like `send` are always statically dispatched,
thus they can accept the service as their first argument, allowing the user to treat
e.g. "`spawn(service`" as a single unit of thought and not forget to write out the
ballast `service`.
Consistency is just as important as convenience. But performance is king.
"""
@inline function send(service::AbstractService{TScheduler, TMsg}, sender, to::Addr, messagebody; kwargs...) where {TScheduler, TMsg, TCore}
message = TMsg(sender, to, messagebody, service.scheduler; kwargs...)
deliver!(service.scheduler, message)
end
@inline function send(service::AbstractService{TScheduler, TMsg}, sender, to::Addr, messagebody::Request; timeout = 2.0, kwargs...) where {TScheduler, TMsg}
settimeout(service.scheduler.tokenservice, Timeout(sender, token(messagebody), timeout, messagebody))
message = TMsg(sender, to, messagebody, service.scheduler; kwargs...)
deliver!(service.scheduler, message)
end
@inline function send(service::AbstractService, sender, to, messagebody; kwargs...)
send(service, sender, addr(to), messagebody; kwargs...)
end
@inline function send(service::AbstractService, sender, to::Nothing, messagebody; kwargs...)
@error "Sending message to 'Nothing' is not possible!"
end
@inline function bulksend(service::AbstractService, sender::Actor, targets, messagebody; kwargs...)
for target in targets
send(service, sender, target, messagebody; kwargs...)
end
end
"""
spawn(service, actor::Actor, [pos::Pos])::Addr
Spawn the given actor on the scheduler represented by `service`, return the address of it.
Part of the actor API, can be called from a lifecycle callback, providing the `service` you got.
The `OnSpawn` message will be delivered to `actor` before this function returns.
# Examples
# TODO: update this sample
```
mutable struct ListItem{TData, TCore} <: Actor{TCore}
data::TData
next::Union{Nothing, Addr}
core::TCore
ListItem(data, core) = new{typeof(data), typeof(core)}(data, nothing, core)
end
struct Append{TData}
value::TData
end
function CircoCore.onmessage(me::ListItem, message::Append, service)
me.next = spawn(service, ListItem(message.value))
end
```
"""
@inline function spawn(service::AbstractService, actor::Actor)::Addr
return spawn(service.scheduler, actor)
end
"""
become(service, old::Actor, reincarnated::Actor)
Reincarnates the `old` actor into `new`, meaning that `old` will be unscheduled,
and `reincarnated` will be scheduled reusing the address of `old`.
The `onbecome` lifecycle callback will be called.
Note: As the name suggests, `become` is the Circonian way of behavior change.
"""
function become(service::AbstractService{TScheduler, TMsg}, old::Actor, reincarnated::Actor) where {TScheduler, TMsg}
scheduler = service.scheduler
_immediate_delivery(old, scheduler, TMsg(addr(scheduler), old, OnBecome(reincarnated), scheduler))
reincarnated.core = old.core
unschedule!(scheduler, old)
return spawn(service, reincarnated)
end
"""
die(service, me::Actor; exit=false)
Permanently unschedule the actor from its current scheduler.
if `exit` is true and this is the last actor on its scheduler,
the scheduler will be terminated.
"""
@inline function die(service::AbstractService, me::Actor; exit = false)
kill!(service.scheduler, me)
if exit
if service.scheduler.actorcount <= service.scheduler.startup_actor_count
service.scheduler.exitflag = true
@debug "Scheduler's exitflag raised"
end
end
end
"""
registername(service, name::String, actor::Union{Addr,Actor})
Register the given actor under the given name in the scheduler-local name registry.
Note that there is no need to unregister the name when migrating or dying
# TODO implement manual and auto-unregistration
"""
@inline function registername(service::AbstractService, name::String, actor::Addr)
registry = get(service.scheduler.plugins, :registry, nothing)
isnothing(registry) && throw(NoRegistryException("Cannot register name $name: Registry plugin not found"))
registername(registry, name, actor)
end
registername(service::AbstractService, name::String, actor::Actor) = registername(service, name, addr(actor))
registername(sdl::AbstractScheduler, name::String, actor_addr) = registername(sdl.service, name, actor_addr)
"""
function getname(service, name::String)::Union{Addr, Nothing}
Return the registered name from the scheduler-local registry, or nothing.
See also: [`NameQuery`](@ref)
"""
@inline function getname(service, name::String)::Union{Addr, Nothing}
registry = get(service.scheduler.plugins, :registry, nothing)
isnothing(registry) && throw(NoRegistryException("Cannot search for name $name: Registry plugin not found"))
return getname(registry, name)
end
@inline function settimeout(service::AbstractService, actor::Actor, timeout_secs::Real = 0.0)
return settimeout(service.scheduler.tokenservice, Timeout(actor, Token(), timeout_secs))
end
@inline function cleartimeout(service::AbstractService, token::Token)
return cleartimeout(service.scheduler.tokenservice, token)
end
@inline pos(service::AbstractService) = pos(service.scheduler) # TODO find its place
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 579 | abstract type Signal end
"""
SigTerm(cause=Nothing; exit=Nothing)
Signal to terminate an actor.
The default handler terminates the actor without delay.
"""
struct SigTerm <: Signal
cause
exit::Union{Nothing, Bool} # Whether the scheduler should exit when this was the last actor and no more work
SigTerm(cause=nothing; exit=nothing) = new(cause, exit)
end
onmessage(me::Actor, msg::SigTerm, service) = begin
@debug "$(box(me)): Dying on" msg
die(service, me; exit=isnothing(msg.exit) ? exitwhenlast(me) : msg.exit)
end
exitwhenlast(me::Actor) = true
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2324 | # SPDX-License-Identifier: MPL-2.0
using LinearAlgebra
"""
pos(a::Actor)::Pos
return the current position of the actor.
Call this on a spawned actor to get its position. Throws `UndefRefError` if the actor is not spawned.
"""
pos(a::Actor) = a.core.pos
"""
Pos(x::Real, y::Real, z::Real)
Pos(coords)
A point in the 3D "actor space".
You can access the coords by pos.x, pos.y, pos.z.
"""
struct Pos <: AbstractVector{Float32}
coords::Tuple{Float32, Float32, Float32}
Pos(x, y, z) = new((x, y, z))
Pos(coords) = new(coords)
end
dist(a::Pos, b::Pos) = sqrt((a.coords[1]-b.coords[1])^2 + (a.coords[2]-b.coords[2])^2 + (a.coords[3]-b.coords[3])^2)
Base.isless(a::Pos, b::Pos) = norm(a) < norm(b)
Base.:*(a::Pos, x::Real) = Pos(a.coords[1] * x, a.coords[2] * x, a.coords[3] * x)
Base.:/(a::Pos, x::Real) = Pos(a.coords[1] / x, a.coords[2] / x, a.coords[3] / x)
Base.:+(a::Pos, b::Pos) = Pos(a.coords[1] + b.coords[1], a.coords[2] + b.coords[2], a.coords[3] + b.coords[3])
Base.:-(a::Pos, b::Pos) = Pos(a.coords[1] - b.coords[1], a.coords[2] - b.coords[2], a.coords[3] - b.coords[3])
Base.getindex(pos::Pos, i::Int) = getindex(pos.coords, i)
Base.getproperty(pos::Pos, symbol::Symbol) = (symbol == :x) ? getfield(pos, :coords)[1] :
(symbol == :y) ? getfield(pos, :coords)[2] :
(symbol == :z) ? getfield(pos, :coords)[3] :
getfield(pos, symbol)
Base.iterate(pos::Pos) = iterate(pos.coords)
Base.iterate(pos::Pos, state) = iterate(pos.coords, state)
Base.length(pos::Pos) = length(pos.coords)
Base.size(pos::Pos) = 3
Base.show(io::IO, ::MIME"text/plain", pos::Pos) = begin
print(io, "Pos($(pos[1]), $(pos[2]), $(pos[3]))")
end
nullpos = Pos(0, 0, 0)
struct EuclideanSpaceImpl <: EuclideanSpace # registered at CircoCore.__init__
EuclideanSpaceImpl(;options...) = new()
end
posinit() = nullpos
posinit(scheduler, actor, actorid) = begin
if isdefined(actor, :core) && pos(actor) != nullpos
return pos(actor) # Predefined pos or migration
end
outpos = Ref(nullpos)
actorpos = scheduler.hooks.spawnpos(scheduler, actor, outpos)
return outpos[]
end
Plugins.customfield(::Space, ::Type{AbstractCoreState}) = Plugins.FieldSpec("pos", Pos, posinit)
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 754 | # SPDX-License-Identifier: MPL-2.0
module Activity
using Plugins
using ..CircoCore
mutable struct SparseActivityImpl <: CircoCore.SparseActivity
counter::UInt
SparseActivityImpl(;options...) = new(1)
end
__init__() = Plugins.register(SparseActivityImpl)
@inline function CircoCore.localdelivery(as::SparseActivityImpl, scheduler, msg, targetactor)
if as.counter == 0
scheduler.hooks.actor_activity_sparse16(scheduler, targetactor)
as.counter = rand(UInt8) >> 3
if as.counter % 2 == 1
as.counter -= 1
end
if as.counter < 2
scheduler.hooks.actor_activity_sparse256(scheduler, targetactor)
end
else
as.counter -= 1
end
return false
end
end # module | CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2449 | # SPDX-License-Identifier: MPL-2.0
import Base.isless
const TIMEOUTCHECK_INTERVAL = 1.0
TokenId = UInt64
struct Token
id::TokenId
Token() = new(rand(TokenId))
Token(id::TokenId) = new(id)
end
"""
abstract type Tokenized end
Tokenized messages can be tracked automatically by the scheduler.
When an actor sends out a [`Request`](@ref), a timeout will be set up
to track the fulfillment of the request. When a `Response` with the same token
is received, the timeout will be cancelled. See also: [`send`](@ref).
"""
abstract type Tokenized end
token(t::Tokenized) = t.token
abstract type Request <: Tokenized end
abstract type Response <: Tokenized end
"""
abstract type Failure <: Response end
`Failure` is a type of `Response` to a `Request` that fails to fulfill it.
"""
abstract type Failure <: Response end
struct Timer
timeout_secs::Float64
end
struct Timeout{TCause}
watcher::Addr
token::Token
deadline::Float64
cause::TCause
end
Timeout(watcher::Actor, token::Token, timeout_secs, cause = Timer(timeout_secs)) = Timeout{typeof(cause)}(addr(watcher), token, Base.Libc.time() + timeout_secs, cause)
Base.isless(a::Timeout, b::Timeout) = isless(a.deadline, b.deadline)
const TimerTimeout = Timeout{Timer}
mutable struct TokenService # TODO <: Plugin
next_timeoutcheck_ts::Float64
timeouts::Dict{Token, Timeout}
TokenService() = new(Base.Libc.time() + TIMEOUTCHECK_INTERVAL, Dict())
end
@inline function settimeout(tokenservice::TokenService, timeout::Timeout)
tokenservice.timeouts[timeout.token] = timeout
end
@inline function cleartimeout(tokenservice::TokenService, token::Token)
removed = pop!(tokenservice.timeouts, token, nothing)
end
@inline function needchecktimeouts!(tokenservice::TokenService)
ts = Base.Libc.time()
if tokenservice.next_timeoutcheck_ts > ts
return false
end
tokenservice.next_timeoutcheck_ts = ts + TIMEOUTCHECK_INTERVAL
return true
end
# TODO optimize
@inline function poptimeouts!(tokenservice::TokenService, currenttime = Base.Libc.time())::Vector{Timeout}
retval = Vector{Timeout}()
firedtokens = Vector{Token}()
for (token, timeout) in tokenservice.timeouts
if timeout.deadline < currenttime
push!(retval, timeout)
push!(firedtokens, token)
end
end
for token in firedtokens
delete!(tokenservice.timeouts, token)
end
return retval
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2964 | # SPDX-License-Identifier: MPL-2.0
module UDPPostOffice_
using Plugins
using ..CircoCore
import ..CircoCore: AbstractCoreState
using Serialization
using Sockets
using DataStructures
mutable struct UDPPostOffice <: CircoCore.PostOffice
outsocket::UDPSocket
inqueue::Deque{Any}
stopped::Bool
postcode::PostCode
socket::UDPSocket
intask
UDPPostOffice(;options...) = begin
return new(UDPSocket(), Deque{Any}(), false)
end
end
__init__() = Plugins.register(UDPPostOffice)
addrinit() = nulladdr
addrinit(scheduler, actor, actorid) = Addr(postcode(scheduler.plugins[:postoffice]), actorid)
Plugins.customfield(::PostOffice, ::Type{AbstractCoreState}) = Plugins.FieldSpec("addr", Addr, addrinit)
function allocate_postcode()
socket = UDPSocket()
ipaddr = Sockets.getipaddr()
for port in CircoCore.PORT_RANGE
postcode = "$(ipaddr):$port"
bound = bind(socket, ipaddr, port)
bound || continue
@debug "Bound to $postcode"
return postcode, socket
end
throw(PostException("No available port found for a Post Office"))
end
CircoCore.setup!(post::UDPPostOffice, scheduler) = begin
postcode, socket = allocate_postcode()
post.postcode = postcode
post.socket = socket
end
CircoCore.schedule_start(post::UDPPostOffice, scheduler) = begin
post.intask = @async arrivals(post) # TODO errors throwed here are not logged
end
CircoCore.schedule_stop(post::UDPPostOffice, scheduler) = begin
post.stopped = true
yield()
end
CircoCore.shutdown!(post::UDPPostOffice, scheduler) = close(post.socket)
@inline CircoCore.letin_remote(post::UDPPostOffice, scheduler::AbstractScheduler)::Bool = begin
for i = 1:min(length(post.inqueue), 30)
CircoCore.deliver!(scheduler, popfirst!(post.inqueue))
end
return false
end
function arrivals(post::UDPPostOffice)
try
while !post.stopped
rawmsg = recv(post.socket) # TODO: this blocks, so we will only exit if an extra message comes in after stopping
stream = IOBuffer(rawmsg)
msg = deserialize(stream)
@debug "Postoffice got message $msg"
push!(post.inqueue, msg)
end
catch e
if !(e isa EOFError)
@info "Exception in arrivals", e
end
end
end
function send(post::UDPPostOffice, msg::AbstractMsg)
remoteroutes(post, nothing, msg)
end
@inline function CircoCore.remoteroutes(post::UDPPostOffice, scheduler, msg::AbstractMsg)::Bool
@debug "PostOffice delivery at $(postcode(post)): $msg"
try
parts = split(postcode(target(msg)), ":")
ip = parse(IPAddr, parts[1])
port = parse(UInt16, parts[2])
io = IOBuffer()
serialize(io, msg)
Sockets.send(post.outsocket, ip, port, take!(io))
catch e
@error "Unable to send $msg" exception = (e, catch_backtrace())
return false
end
return true
end
end # module
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 4269 | # SPDX-License-Identifier: MPL-2.0
module ZMQPostOffices
using Plugins
using ..CircoCore
import ..CircoCore: AbstractCoreState
using Serialization, Sockets
using DataStructures
import ZMQ
mutable struct ZMQPostOffice <: CircoCore.PostOffice
outsockets::Dict{PostCode, ZMQ.Socket}
inqueue::Deque{Any}
stopped::Bool
ip::IPv4
postcode::PostCode
socket::ZMQ.Socket
intask
ZMQPostOffice(;zmq_postoffice_ip::Union{IPv4, Nothing}=nothing, _...) = begin
if isnothing(zmq_postoffice_ip)
zmq_postoffice_ip = haskey(ENV, "ZMQ_POSTOFFICE_IP") ? IPv4(ENV["ZMQ_POSTOFFICE_IP"]) : getipaddr() # TODO support ipv6
end
return new(Dict(), Deque{Any}(), false, zmq_postoffice_ip)
end
end
addrinit() = nulladdr
addrinit(scheduler, actor, actorid) = Addr(postcode(scheduler.plugins[:postoffice]), actorid)
Plugins.customfield(::ZMQPostOffice, ::Type{AbstractCoreState}) = Plugins.FieldSpec("addr", Addr, addrinit)
__init__() = Plugins.register(ZMQPostOffice)
CircoCore.postcode(post::ZMQPostOffice) = post.postcode
CircoCore.addr(post::ZMQPostOffice) = Addr(postcode(post), 0)
function allocate_postcode(ip)
socket = ZMQ.Socket(ZMQ.PULL)
for port in CircoCore.PORT_RANGE
try
buf = IOBuffer()
print(buf, ip)
ipstr = String(take!(buf))
postcode = "$(ipstr):$port"
ZMQ.bind(socket, "tcp://" * postcode)
@info "ZMQPostOffice bound to $postcode"
return postcode, socket
catch e
isa(e, ZMQ.StateError) || rethrow()
end
end
throw(PostException("No available port found for a Post Office"))
end
CircoCore.setup!(post::ZMQPostOffice, scheduler) = begin
postcode, socket = allocate_postcode(post.ip)
post.postcode = postcode
post.socket = socket
end
CircoCore.schedule_start(post::ZMQPostOffice, scheduler) = begin
post.intask = @async arrivals(post) # TODO errors throwed here are not logged
end
CircoCore.schedule_stop(post::ZMQPostOffice, scheduler) = begin
post.stopped = true
yield()
end
CircoCore.shutdown!(post::ZMQPostOffice, scheduler) = begin
close(post.socket)
for socket in values(post.outsockets)
ZMQ.close(socket)
end
end
@inline CircoCore.letin_remote(post::ZMQPostOffice, scheduler::AbstractScheduler)::Bool = begin
for i = 1:min(length(post.inqueue), 30)
CircoCore.deliver!(scheduler, popfirst!(post.inqueue))
end
return false
end
function arrivals(post::ZMQPostOffice)
try
while !post.stopped
message = recv(post.socket)
stream = convert(IOStream, message)
seek(stream, 0)
msg = deserialize(stream)
push!(post.inqueue, msg)
end
catch e
if !(e isa EOFError)
@error "Exception in arrivals" exception = (e, catch_backtrace())
end
end
end
function createsocket!(post::ZMQPostOffice, targetpostcode::PostCode)
socket = ZMQ.Socket(ZMQ.PUSH)
socketstr = "tcp://" * targetpostcode
ZMQ.connect(socket, socketstr)
post.outsockets[targetpostcode] = socket
return socket
end
createsocket!(post::ZMQPostOffice, target::Addr) = createsocket!(post, postcode(target))
@inline function getsocket(post::ZMQPostOffice, target::Addr)
socket = get(post.outsockets, postcode(target), nothing)
if isnothing(socket)
return createsocket!(post, target)
end
return socket
end
function send(post::ZMQPostOffice, msg::AbstractMsg)
remoteroutes(post, nothing, msg)
end
@inline function send(post::PostOffice, message)
#println("Sending out $message")
socket = getsocket(post, target(message))
io = IOBuffer()
serialize(io, message)
ZMQ.send(socket, ZMQ.Message(io))
end
@inline function CircoCore.remoteroutes(post::ZMQPostOffice, scheduler, msg::AbstractMsg)::Bool
@debug "PostOffice delivery at $(postcode(post)): $msg"
try
socket = getsocket(post, target(msg))
io = IOBuffer()
serialize(io, msg)
ZMQ.send(socket, ZMQ.Message(io))
catch e
@error "Unable to send $msg" exception = (e, catch_backtrace())
error(42)
return false
end
return true
end
end # module
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2894 | # SPDX-License-Identifier: MPL-2.0
# A Zygote creates a Cell which then repeatedly reincarnates and
# sends back notifications from lifecycle callbacks.
# Both die after DEPTH incarnations.
using Test
using CircoCore
using Plugins
const DEPTH = 10
mutable struct Zygote <: Actor{Any}
cell_incarnation_count::Int
cell_spawn_count::Int
cell_death_count::Int
core::Any
Zygote() = new(0, 0, 0)
end
mutable struct Cell{T} <: Actor{Any}
zygote::Addr
core::Any
Cell{T}(zygote) where T = new{T}(zygote)
end
CircoCore.onmessage(me::Zygote, ::OnSpawn, service) = begin
child = spawn(service, Cell{Val(1)}(addr(me)))
me.cell_incarnation_count = 1
send(service, me, child, Reincarnate())
end
struct Reincarnate end
getval(v::Val{V}) where V = V
getval(c::Cell{T}) where T = getval(T)
CircoCore.onmessage(me::Cell, ::Reincarnate, service) = begin
depth = getval(me)
if depth >= DEPTH
die(service, me)
else
reincarnated = Cell{Val(depth + 1)}(me.zygote)
@test become(service, me, reincarnated) == addr(me)
send(service, reincarnated, addr(me), Reincarnate())
end
end
struct Reincarnated
addr::Addr
end
CircoCore.onmessage(me::Cell, msg::OnBecome, service) = begin
@test msg.reincarnation isa Cell
@test getval(me) + 1 == getval(msg.reincarnation)
send(service, me, me.zygote, Reincarnated(addr(me)))
end
CircoCore.onmessage(me::Zygote, ::Reincarnated, service) = begin
me.cell_incarnation_count += 1
end
struct Spawned
addr::Addr
end
CircoCore.onmessage(me::Cell, ::OnSpawn, service) = begin
send(service, me, me.zygote, Spawned(me))
end
CircoCore.onmessage(me::Zygote, msg::Spawned, service) = begin
me.cell_spawn_count += 1
end
struct Died
addr::Addr
end
CircoCore.onmessage(me::Cell, ::OnDeath, service) = begin
send(service, me, me.zygote, Died(me))
end
CircoCore.onmessage(me::Zygote, msg::Died, service) = begin
me.cell_death_count += 1
die(service, me; exit = true)
end
struct LifecyclePlugin <: Plugin
actor_spawning_calls::Dict{ActorId, Int}
LifecyclePlugin(;options...) = new(Dict())
end
Plugins.symbol(::LifecyclePlugin) = :lifecycle
Plugins.register(LifecyclePlugin)
function CircoCore.actor_spawning(p::LifecyclePlugin, scheduler, actor)
count = get(p.actor_spawning_calls, box(actor), 0)
p.actor_spawning_calls[box(actor)] = count + 1
end
@testset "Actor Lifecycle" begin
ctx = CircoContext(target_module = @__MODULE__; userpluginsfn = (;options...) -> [LifecyclePlugin])
zygote = Zygote()
scheduler = Scheduler(ctx, [zygote])
wait(run!(scheduler; remote = false))
@test zygote.cell_incarnation_count == DEPTH
@test zygote.cell_spawn_count == 1
@test zygote.cell_death_count == 1
@test scheduler.plugins[:lifecycle].actor_spawning_calls[box(zygote)] == 1
shutdown!(scheduler)
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 608 | using Test
using CircoCore
struct Actor1 <: Actor{Any}
core
end
@testset "ActorStore" begin
s = CircoCore.ActorStore(UInt64(1) => Actor1(1), UInt64(2) => Actor1(2))
@test length(s) == 2
@test haskey(s, UInt64(1))
@test s[UInt64(1)].core == 1
@test get(s, UInt64(2), nothing).core == 2
for p in s
@test p isa Pair{UInt64, Actor1}
end
for a in values(s)
@test a isa Actor1
end
delete!(s, UInt64(1))
@test !haskey(s, UInt64(1))
@test length(s) == 1
s[UInt64(1)] = Actor1(42)
@test s[UInt64(1)].core == 42
@test length(s) == 2
end | CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1976 | # SPDX-License-Identifier: MPL-2.0
# This test builds a binary tree of TreeActors, growing a new level for every
# Start message received by the TreeCreator.
# The growth of every leaf is reported back by its parent to the
# TreeCreator, which counts the nodes in the tree.
using Test
using CircoCore
import CircoCore.onmessage
struct Start end
struct GrowRequest
creator::Addr
end
struct GrowResponse
leafsgrown::Int
end
mutable struct TreeActor{TCore} <: Actor{TCore}
left::Addr
right::Addr
core::TCore
end
TreeActor(core) = TreeActor(Addr(), Addr(), core)
function onmessage(me::TreeActor, message::GrowRequest, service)
if CircoCore.isnulladdr(me.left)
me.left = spawn(service, TreeActor(emptycore(service)))
me.right = spawn(service, TreeActor(emptycore(service)))
send(service, me, message.creator, GrowResponse(2))
else
send(service, me, me.left, message)
send(service, me, me.right, message)
end
end
mutable struct TreeCreator{TCore} <: Actor{TCore}
nodecount::Int64
root::Addr
core::TCore
end
TreeCreator(core) = TreeCreator(0, Addr(), core)
function onmessage(me::TreeCreator, ::Start, service)
if CircoCore.isnulladdr(me.root)
me.root = spawn(service, TreeActor(emptycore(service)))
me.nodecount = 1
end
send(service, me, me.root, GrowRequest(addr(me)))
end
function onmessage(me::TreeCreator, message::GrowResponse, service)
me.nodecount += message.leafsgrown
end
@testset "Actor" begin
@testset "Actor-Tree" begin
ctx = CircoContext(;target_module=@__MODULE__)
creator = TreeCreator(emptycore(ctx))
scheduler = Scheduler(ctx, [creator])
scheduler(;remote = false) # to spawn the zygote
for i in 1:17
send(scheduler, creator, Start())
@time scheduler(;remote = false)
@test creator.nodecount == 2^(i+1)-1
end
shutdown!(scheduler)
end
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1804 | module ClassicTest
using ActorInterfaces.Classic
export Pop, Push, StackNode, TestCoordinator
struct Pop
customer::Addr
end
struct Push
content
end
struct StackNode
content
link::Union{Addr, Nothing}
end
struct Forwarder
target::Addr
end
@ctx function Classic.onmessage(me::Forwarder, msg)
send(me.target, msg)
end
@ctx function Classic.onmessage(me::StackNode, msg::Push)
p = spawn(StackNode(me.content, me.link))
become(StackNode(msg.content, p))
end
@ctx function Classic.onmessage(me::StackNode, msg::Pop)
if !isnothing(me.link)
become(Forwarder(me.link))
end
send(msg.customer, me.content)
end
struct TestCoordinator
received::Vector{Any}
end
@ctx function Classic.onmessage(me::TestCoordinator, msg)
push!(me.received, msg)
end
end # module
import CircoCore
using Test
using .ClassicTest
@testset "Stack" begin
ctx = CircoCore.CircoContext(;target_module=@__MODULE__)
s = CircoCore.Scheduler(ctx)
CircoCore.run!(s)
stack = StackNode(nothing, nothing)
stackaddr = CircoCore.spawn(s, stack)
coordinator = TestCoordinator([])
coordaddr = CircoCore.spawn(s, coordinator)
CircoCore.send(s, stackaddr, Push(42))
CircoCore.send(s, stackaddr, Push(43))
@test length(coordinator.received) == 0
CircoCore.send(s, stackaddr, Pop(coordaddr))
sleep(0.05)
@test coordinator.received == Any[43]
CircoCore.send(s, stackaddr, Pop(coordaddr))
sleep(0.05)
@test coordinator.received == Any[43, 42]
CircoCore.send(s, stackaddr, Pop(coordaddr))
sleep(0.05)
@test coordinator.received == Any[43, 42, nothing]
CircoCore.send(s, stackaddr, Pop(coordaddr))
sleep(0.05)
@test coordinator.received == Any[43, 42, nothing, nothing]
CircoCore.shutdown!(s)
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2713 | # SPDX-License-Identifier: MPL-2.0
using Test
using CircoCore
import CircoCore: onmessage
const TARGET_COUNT = 13
const EVENT_COUNT = 133
struct Start end
struct NonTopicEvent <: Event
value::String
end
struct TopicEvent <: Event
topic::String
value::String
end
mutable struct TestEventSource{TCore} <: Actor{TCore}
eventdispatcher::Addr
core::TCore
end
TestEventSource(core) = TestEventSource(Addr(), core)
CircoCore.traits(::Type{<:TestEventSource}) = (EventSource,)
mutable struct EventTarget{TCore} <: Actor{TCore}
received_nontopic_count::Int64
received_topic_count::Int64
core::TCore
end
EventTarget(core) = EventTarget(0, 0, core)
function onmessage(me::TestEventSource, ::OnSpawn, service)
registername(service, "eventsource", me)
end
function onmessage(me::EventTarget, ::OnSpawn, service)
eventsource = getname(service, "eventsource")
send(service, me, eventsource, Subscribe(NonTopicEvent, addr(me)))
send(service, me, eventsource, Subscribe(TopicEvent, addr(me), "topic3"))
send(service, me, eventsource, Subscribe(TopicEvent, addr(me), "topic4"))
send(service, me, eventsource, Subscribe(TopicEvent, addr(me), event -> event.topic == "topic5"))
end
function onmessage(me::TestEventSource, message::Start, service)
for i=1:EVENT_COUNT
fire(service, me, NonTopicEvent("Test event #$i"))
fire(service, me, TopicEvent("topic$i", "Topic event #$i"))
end
end
function onmessage(me::EventTarget, message::NonTopicEvent, service)
me.received_nontopic_count += 1
end
function onmessage(me::EventTarget, message::TopicEvent, service)
me.received_topic_count += 1
end
@testset "Event" begin
ctx = CircoContext(;target_module=@__MODULE__)
source = TestEventSource(emptycore(ctx))
targets = [EventTarget(emptycore(ctx)) for i=1:TARGET_COUNT]
scheduler = Scheduler(ctx, [source; targets])
scheduler(;remote = false) # to spawn the zygote
send(scheduler, source, Start())
scheduler(;remote = false)
for target in targets
@test target.received_nontopic_count == EVENT_COUNT
@test target.received_topic_count == 3
end
# unsubscribe and rerun
send(scheduler, source, UnSubscribe(addr(source), TopicEvent))
send(scheduler, source, Start())
scheduler(;remote = false)
for target in targets
@test target.received_nontopic_count == 2 * EVENT_COUNT
@test target.received_topic_count == 3
end
send(scheduler, source, SigTerm())
@test CircoCore.is_scheduled(scheduler, source.eventdispatcher)
scheduler(;remote = false)
@test !CircoCore.is_scheduled(scheduler, source.eventdispatcher)
shutdown!(scheduler)
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2453 | # SPDX-License-Identifier: MPL-2.0
using Test, Printf
using CircoCore
mutable struct PingPonger{TCore} <: Actor{TCore}
peer::Union{Addr, Nothing}
pings_sent::Int64
pongs_got::Int64
core::TCore
end
PingPonger(peer, core) = PingPonger(peer, 0, 0, core)
struct Ping end
struct Pong end
struct CreatePeer end
@inline function sendping(service, me::PingPonger)
send(service, me, me.peer, Ping())
me.pings_sent += 1
end
@inline function sendpong(service, me::PingPonger)
send(service, me, me.peer, Pong())
end
function CircoCore.onmessage(me::PingPonger, message::CreatePeer, service)
peer = PingPonger(addr(me), emptycore(service))
me.peer = spawn(service, peer)
sendping(service, me)
end
@inline function CircoCore.onmessage(me::PingPonger, ::Ping, service)
sendpong(service, me)
end
@inline function CircoCore.onmessage(me::PingPonger, ::Pong, service)
me.pongs_got += 1
sendping(service, me)
end
const PINGER_PARALLELISM = 1
@testset "PingPong" begin
ctx = CircoContext(;target_module=@__MODULE__, profile=CircoCore.Profiles.MinimalProfile(),
userpluginsfn=()->[CircoCore.PostOffice])
pingers = [PingPonger(nothing, emptycore(ctx)) for i=1:PINGER_PARALLELISM]
scheduler = Scheduler(ctx, pingers)
scheduler(;remote = false) # to spawn the zygote
for pinger in pingers
send(scheduler, pinger, CreatePeer())
end
schedulertask = @async scheduler(; remote = false)
@info "Sleeping to allow ping-pong to start."
sleep(3.0)
for pinger in pingers
@test pinger.pings_sent > 1e3
@test pinger.pongs_got > 1e3
end
@info "Measuring ping-pong performance (10 secs)"
startpingcounts = [pinger.pings_sent for pinger in pingers]
startts = Base.time_ns()
sleep(4.0)
rounds_made = sum([pingers[i].pings_sent - startpingcounts[i] for i=1:length(pingers)])
wall_time_used = Base.time_ns() - startts
for pinger in pingers
@test pinger.pings_sent > 1e3
@test pinger.pongs_got > 1e3
end
shutdown!(scheduler)
sleep(0.001)
endpingcounts = [pinger.pings_sent for pinger in pingers]
sleep(0.1)
for i = 1:length(pingers)
@test pingers[i].pongs_got in [pingers[i].pings_sent, pingers[i].pings_sent - 1]
@test endpingcounts[i] === pingers[i].pings_sent
end
@printf "In-thread ping-pong performance: %f rounds/sec\n" (rounds_made / wall_time_used * 1e9)
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 384 | # SPDX-License-Identifier: MPL-2.0
# julia -i -q --color=yes --project revise.jl example
using Revise, Jive
using CircoCore
trigger = function (path)
printstyled("changed ", color=:cyan)
println(path)
revise()
runtests(@__DIR__, skip=["revise.jl"])
end
watch(trigger, @__DIR__, sources=[pathof(CircoCore)])
trigger("")
Base.JLOptions().isinteractive==0 && wait()
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 113 | # SPDX-License-Identifier: MPL-2.0
using Jive
runtests(@__DIR__, skip=["revise.jl", "remotesend/remotesend.jl"])
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 3464 | # SPDX-License-Identifier: MPL-2.0
using Test
using CircoCore
using Plugins
struct StartMsg end
struct Die
exit::Bool
end
mutable struct Dummy <: Actor{Any}
core::Any
diemessagearrived::Bool
Dummy(core) = new(core, false)
end
function CircoCore.onmessage(me::Dummy, msg::Die, service)
me.diemessagearrived = true
die(service, me; exit = msg.exit)
end
function createDummyActors(numberOfActor, ctx)
actors = []
while numberOfActor != 0
push!(actors, Dummy(emptycore(ctx)))
numberOfActor -= 1
end
return actors
end
function validateActors(actors::Vector{Any}, expectedValue::Bool)
# every actor has the same diemessagearrived fieldvalue
@test mapreduce(a -> a.diemessagearrived, *, actors) == expectedValue
end
@testset "Scheduler" begin
@testset "Scheduler with remote = false and exit = false" begin
ctx = ctx = CircoContext(target_module = @__MODULE__)
dummy = Dummy(emptycore(ctx))
sdl = Scheduler(ctx, [])
spawn(sdl, dummy)
@test isempty(sdl.msgqueue)
@test sdl.actorcount >= sdl.startup_actor_count
@test dummy.diemessagearrived == false
send(sdl, dummy, StartMsg())
@test !isempty(sdl.msgqueue)
sdl(;remote = false)
@test isempty(sdl.msgqueue)
@test sdl.actorcount >= sdl.startup_actor_count
@test dummy.diemessagearrived == false
shutdown!(sdl)
end
@testset "Scheduler with remote false and exiting true" begin
ctx = ctx = CircoContext(target_module = @__MODULE__)
dummy = Dummy(emptycore(ctx))
sdl = Scheduler(ctx, [])
spawn(sdl, dummy)
@test isempty(sdl.msgqueue)
@test sdl.actorcount >= sdl.startup_actor_count
@test dummy.diemessagearrived == false
send(sdl, dummy, Die(true))
@test !isempty(sdl.msgqueue)
sdl(;remote = false)
@test isempty(sdl.msgqueue)
@test sdl.actorcount == sdl.startup_actor_count
@test dummy.diemessagearrived == true
shutdown!(sdl)
end
@testset "Scheduler with remote true and exiting true, more Actor" begin
ctx = CircoContext(target_module = @__MODULE__)
actors = createDummyActors(10, ctx)
finishedSignal = Channel{}(2)
sdl = Scheduler(ctx, [])
map(a -> spawn(sdl, a), actors)
@async begin
sdl(;remote = true) #stops when all actors die
@test isempty(sdl.msgqueue)
@test sdl.actorcount == sdl.startup_actor_count
# if every body got the Die message that means the scheduler didn't exited when got the first exit = true "call"
validateActors(actors, true)
put!(finishedSignal, true)
end
@test isempty(sdl.msgqueue)
@test sdl.actorcount >= sdl.startup_actor_count
validateActors(actors, false)
for index in eachindex(actors)
dummy = actors[index]
send(sdl, dummy, Die(true))
sleep(0.8)
@test isempty(sdl.msgqueue)
@test sdl.actorcount + index - length(actors) == sdl.startup_actor_count
@test dummy.diemessagearrived == true
end
@test take!(finishedSignal)
shutdown!(sdl)
end
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 596 | module SignalTest
using Test
using CircoCore
mutable struct SigTest <: Actor{Any}
core
end
@testset "SigTerm" begin
ctx = CircoContext(;target_module=@__MODULE__)
tester = SigTest(emptycore(ctx))
scheduler = Scheduler(ctx, [tester])
scheduler(;remote = false)
@test CircoCore.is_scheduled(scheduler, tester) == true
send(scheduler, tester, SigTerm())
actorcount = scheduler.actorcount
scheduler(;remote = false)
@test CircoCore.is_scheduled(scheduler, tester) == false
@test scheduler.actorcount == actorcount - 1
shutdown!(scheduler)
end
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 996 | # SPDX-License-Identifier: MPL-2.0
using Test
using CircoCore
using Plugins
struct SchedulerMock
hooks
SchedulerMock() = new((
actor_activity_sparse16 = (scheduler, actor) -> actor.count16 += 1,
actor_activity_sparse256 = (scheduler, actor) -> actor.count256 += 1
))
end
struct HooksMock
actor_activity_sparse16
actor_activity_sparse256
end
mutable struct ActorMock
count16::Int
count256::Int
end
const NUM_SAMPLES = 20000
@testset "SparseActivity" begin
scheduler = SchedulerMock()
as = CircoCore.Activity.SparseActivityImpl()
actor = ActorMock(0, 0)
for i = 1:NUM_SAMPLES
CircoCore.localdelivery(as, scheduler, nothing, actor)
end
println("sparse16: $(actor.count16 / NUM_SAMPLES) vs $(1 / 16)")
println("sparse256: $(actor.count256 / NUM_SAMPLES) vs $(1 / 256)")
@test isapprox(actor.count16 / NUM_SAMPLES, 1 / 16; atol = 0.06)
@test isapprox(actor.count256 / NUM_SAMPLES, 1 / 256; atol = 0.03)
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 2317 | # SPDX-License-Identifier: MPL-2.0
module TokenTest
using Test
using Dates
using CircoCore
MESSAGE_COUNT = 201
struct TRequest <: Request
id::UInt64
token::Token
TRequest(id) = new(id, Token())
end
mutable struct Requestor{TCore} <: Actor{TCore}
replycount::Int
failurecount::Int
timeoutcount::Int
responder::Addr
core::TCore
Requestor(core) = new{typeof(core)}(0, 0, 0, Addr(), core)
end
struct TReply <: Response
requestid::UInt64
token::Token
end
struct TFailure <: Failure
requestid::UInt64
token::Token
end
mutable struct Responder{TCore} <: Actor{TCore}
core::TCore
end
CircoCore.onmessage(me::Responder, ::OnSpawn, service) = begin
registername(service, string(TRequest), me)
end
CircoCore.onmessage(me::Requestor, ::OnSpawn, service) = begin
registername(service, "requestor", me)
me.responder = getname(service, string(TRequest))
for i=1:MESSAGE_COUNT
send(service, me, me.responder, TRequest(i); timeout = 2.0)
end
end
CircoCore.onmessage(me::Responder, req::TRequest, service) = begin
if req.id % 3 == 1
send(service, me, getname(service, "requestor"), TReply(req.id, req.token))
end
if req.id % 3 == 2
send(service, me, getname(service, "requestor"), TFailure(req.id, req.token))
end
if req.id == MESSAGE_COUNT
die(service, me)
end
end
CircoCore.onmessage(me::Requestor, resp::TReply, service) = begin
me.replycount += 1
end
CircoCore.onmessage(me::Requestor, resp::TFailure, service) = begin
me.failurecount += 1
end
CircoCore.onmessage(me::Requestor, timeout::Timeout, service) = begin
me.timeoutcount += 1
if me.timeoutcount == MESSAGE_COUNT / 3
println("Got $(me.timeoutcount) timeouts, exiting.")
die(service, me; exit=true)
end
end
@testset "Token" begin
ctx = CircoContext(;target_module=@__MODULE__)
requestor = Requestor(emptycore(ctx))
responder = Responder(emptycore(ctx))
scheduler = Scheduler(ctx, [responder, requestor])
scheduler(;remote=true)
@test requestor.responder == addr(responder)
@test requestor.replycount == MESSAGE_COUNT / 3
@test requestor.failurecount == MESSAGE_COUNT / 3
@test length(scheduler.tokenservice.timeouts) == 0
shutdown!(scheduler)
end
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1650 | module TraitTest
using CircoCore
using Test
abstract type MyMsg end
struct NonTraitedMsg <: MyMsg
a
end
struct AllTraitMsg <: MyMsg
a
end
struct SecondTraitMsg <: MyMsg
a
end
struct Trait1 end
struct Trait2
config::Int
end
struct Trait3 end
mutable struct TraitTester <: Actor{Any}
gotmessages
core
TraitTester() = new([])
end
CircoCore.traits(::Type{TraitTester}) = (Trait1, Trait2(42), Trait3)
CircoCore.ontraitmessage(t::Union{Trait1, Trait2, Trait3}, me, msg::AllTraitMsg, service) = begin
push!(me.gotmessages, (typeof(t), msg))
end
CircoCore.ontraitmessage(t::Trait2, me, msg::SecondTraitMsg, service) = begin
push!(me.gotmessages, (typeof(t), t.config, msg))
end
CircoCore.onmessage(me::TraitTester, msg::MyMsg, service) = begin
push!(me.gotmessages, (Nothing, msg))
end
@testset "Trait Order" begin
ctx = CircoContext(;target_module=@__MODULE__)
tester = TraitTester()
scheduler = Scheduler(ctx, [tester])
scheduler(;remote=false)
send(scheduler, tester, SecondTraitMsg(2))
send(scheduler, tester, AllTraitMsg(1))
send(scheduler, tester, NonTraitedMsg(3))
scheduler(;remote=false)
@test tester.gotmessages[1] == (Trait2, 42, SecondTraitMsg(2))
@test tester.gotmessages[2] == (Nothing, SecondTraitMsg(2))
@test tester.gotmessages[3] == (Trait1, AllTraitMsg(1))
@test tester.gotmessages[4] == (Trait2, AllTraitMsg(1))
@test tester.gotmessages[5] == (Trait3, AllTraitMsg(1))
@test tester.gotmessages[6] == (Nothing, AllTraitMsg(1))
@test tester.gotmessages[7] == (Nothing, NonTraitedMsg(3))
shutdown!(scheduler)
end
end # module
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 149 | # SPDX-License-Identifier: MPL-2.0
REMOTE_TEST_PAYLOAD = "Sent remotely"
MESSAGE_COUNT = 3752
struct TestMessage
id::UInt64
data::String
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 667 | using CircoCore
include("remotesend-base.jl")
ctx = CircoContext(;target_module=@__MODULE__)
println(@__MODULE__)
function sendtoremote(receiveraddress)
scheduler = Scheduler(ctx)
println("Sending out $MESSAGE_COUNT messages to $receiveraddress")
@time begin
sentout = 0
while sentout < MESSAGE_COUNT
for i in 1:min(100, MESSAGE_COUNT - sentout)
send(scheduler, Addr(receiveraddress), TestMessage(i, REMOTE_TEST_PAYLOAD))
sentout += 1
end
scheduler(;remote = false)
sleep(0.1)
end
end
println("Messages sent.")
shutdown!(scheduler)
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
|
[
"MPL-2.0"
] | 0.2.21 | 693005fe9ead5e4a74af9fb943e7c37ff635ef08 | code | 1283 | using Test
using CircoCore
import CircoCore.onmessage
include("remotesend-base.jl")
mutable struct Receiver{TCore} <: Actor{TCore}
messages::Array{TestMessage}
core::TCore
end
Receiver(core) = Receiver(TestMessage[], core)
function onmessage(me::Receiver, message::TestMessage, service)
push!(me.messages, message)
if length(me.messages) >= MESSAGE_COUNT
die(service, me; exit = true)
end
end
function startsender(receiveraddress)
prefix = endswith(pwd(), "test") ? "" : "test/"
source = "include(\"$(prefix)remotesend/remotesend-sender.jl\");sendtoremote(\"$receiveraddress\")"
run(pipeline(Cmd(["julia", "--project", "-e", source]);stdout=stdout,stderr=stderr);wait=false)
end
@testset "Remote Send" begin
ctx = CircoContext(;target_module=@__MODULE__)
receiver = Receiver(emptycore(ctx))
scheduler = Scheduler(ctx)
spawn(scheduler, receiver)
sender = startsender(addr(receiver))
scheduler(;remote = false)
wait(sender) # Do not print test results before sender exit logs
@test length(receiver.messages) == MESSAGE_COUNT
@test receiver.messages[end].data == REMOTE_TEST_PAYLOAD
@test receiver.messages[1].id == 1
#@test receiver.messages[end].id == MESSAGE_COUNT
shutdown!(scheduler)
end
| CircoCore | https://github.com/Circo-dev/CircoCore.jl.git |
Subsets and Splits