licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 570 | module SmoothInterpolation
using DataInterpolations:
DataInterpolations, LinearInterpolation, AbstractInterpolation, munge_data, _interpolate
using FindFirstFunctions: searchsortedfirstcorrelated
using PrettyTables
include("cache.jl")
include("smoothed_linear_interpolation.jl")
include("integration_inverse.jl")
include("integration.jl")
include("derivatives.jl")
include("utils.jl")
export LinearInterpolationIntInv,
SmoothedLinearInterpolation,
SmoothedLinearInterpolationIntInv,
LinearInterpolation,
invert_integral
end # SmoothInterpolation
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 2902 | abstract type AbstractCache{T} end
"""
The cache object for LinearInterpolationIntInv computations.
"""
struct LinearInterpolationIntInvCache{uType, T} <: AbstractCache{T}
u::uType
slope::uType
degenerate_slope::Vector{Bool}
end
function LinearInterpolationIntInvCache(u, t)
Δu = diff(u)
Δt = diff(t)
slope = Δu ./ Δt
degenerate_slope = collect(isapprox.(slope, 0, atol = 1e-5))
return LinearInterpolationIntInvCache{typeof(u), eltype(u)}(u, slope, degenerate_slope)
end
"""
The cache object for SmoothedLinearInterpolation computations.
"""
struct SmoothedLinearInterpolationCache{uType, tType, λType <: Number, T} <:
AbstractCache{T}
u::uType
t::tType
Δu::uType
Δt::tType
ΔΔu::uType
ΔΔt::tType
u_tilde::uType
t_tilde::tType
linear_slope::uType
# Whether ΔΔt is sufficiently close to 0
degenerate_ΔΔt::Vector{Bool}
λ::λType
end
function SmoothedLinearInterpolationCache(u, t, λ)::SmoothedLinearInterpolationCache
Δu = diff(u)
Δt = diff(t)
@assert !any(iszero.(Δt))
pushfirst!(Δt, Δt[1])
push!(Δt, Δt[end])
pushfirst!(Δu, Δu[1])
push!(Δu, Δu[end])
ΔΔu = diff(Δu)
ΔΔt = diff(Δt)
u_tilde = get_spline_ends(u, Δu, λ)
t_tilde = get_spline_ends(t, Δt, λ)
linear_slope = Δu ./ Δt
# Whether ΔΔt is sufficiently close to 0
degenerate_ΔΔt = collect(isapprox.(ΔΔt, 0, atol = 1e-5))
return SmoothedLinearInterpolationCache{typeof(u), typeof(t), typeof(λ), eltype(u)}(
u,
t,
Δu,
Δt,
ΔΔu,
ΔΔt,
u_tilde,
t_tilde,
linear_slope,
degenerate_ΔΔt,
λ,
)
end
"""
The cache object for SmoothedLinearInterpolationIntInv computations.
"""
struct SmoothedLinearInterpolationIntInvCache{uType, T} <: AbstractCache{T}
# The degree of the polynomial whose roots need to be found
degree::Vector{Int}
# Quartic polynomial coefficients
c4::uType
c3::uType
c2::uType
c1::uType
# Coefficients of depressed quartic
p::uType
q::uType
# Whether Δu is sufficiently close to 0
degenerate_Δu::Vector{Bool}
end
function SmoothedLinearInterpolationIntInvCache(A)
coeffs =
hcat([collect(get_quartic_coefficients(A, idx)) for idx in eachindex(A.cache.t)]...)
c4, c3, c2, c1 = collect.(eachrow(coeffs))
# The degree is 5 minus the index of the first (≈) nonzero coefficient
degree = 5 .- findfirst.(coef -> !isapprox(coef, 0; atol = 1e-5), eachcol(coeffs))
p = p_coeff.(c4, c3, c2)
q = q_coeff.(c4, c3, c2, c1)
# Whether Δu is sufficiently close to 0
degenerate_Δu = collect(isapprox.(A.cache.Δu, 0, atol = 1e-5))
return SmoothedLinearInterpolationIntInvCache{typeof(A.u), eltype(A.u)}(
degree,
c4,
c3,
c2,
c1,
p,
q,
degenerate_Δu,
)
end
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 799 | function DataInterpolations._derivative(
A::SmoothedLinearInterpolation{<:AbstractVector},
t::Number,
iguess,
)
(; u, t_tilde) = A.cache
# idx of smallest idx such that A.t[idx] >= t
idx = searchsortedfirstcorrelated(A.t, t, iguess)
if idx == 1 || idx == length(u) + 1
# Linear extrapolation
A.cache.linear_slope[idx]
else
# Interpolation
if t < t_tilde[2 * idx - 2]
U_deriv(A, t, idx - 1)
elseif t > t_tilde[2 * idx - 1]
U_deriv(A, t, idx)
else
# Linear interpolation
A.cache.linear_slope[idx]
end
end
end
function DataInterpolations._derivative(A::AbstractInterpolationIntInv, V::Number, iguess)
t = A(V, iguess)
return 1 / forward_itp(A)(t)
end
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 2310 | """
integrate_spline_section(A::SmoothedLinearinterpolation, idx::Number, t::Number)
Integrate the idx-th spline section from its lower time bound up to t
## Arguments
- `A`: SmoothedLinearInterpolation object
- `idx`: Index of the spline section
- `t`: upper integration bound
"""
function integrate_spline_section(A::SmoothedLinearInterpolation, idx::Number, t::Number)
s = S(A, t, idx)
c4, c3, c2, c1 = get_quartic_coefficients(A, idx)
return c4 * s^4 + c3 * s^3 + c2 * s^2 + c1 * s
end
function DataInterpolations._integral(
A::SmoothedLinearInterpolation,
idx::Number,
t::Number,
)
(; u_tilde, t_tilde) = A.cache
if t == A.t[idx]
return zero(eltype(A.u))
end
# idx of smallest idx such that A.t[idx] >= t
idx = searchsortedfirstcorrelated(A.t, t, idx)
i = 2 * idx
u_tildeᵢ₋₃ = u_tilde[i - 3]
u_tildeᵢ₋₂ = u_tilde[i - 2]
t_tildeᵢ₋₃ = t_tilde[i - 3]
t_tildeᵢ₋₂ = t_tilde[i - 2]
# Integration of lower spline section
if idx == 2
# Special case of the first (half) spline section
# which is linear
if t <= t_tildeᵢ₋₂
u_t = A(t)
out = 0.5 * (t - t_tildeᵢ₋₃) * (u_t + u_tildeᵢ₋₃)
return out
else
out = 0.5 * (t_tildeᵢ₋₂ - t_tildeᵢ₋₃) * (u_tildeᵢ₋₂ + u_tildeᵢ₋₃)
end
elseif idx == length(A.t) + 1
# Special case of upper extrapolation
u_t = A(t)
out = 0.5 * (t - A.t[end - 1]) * (u_t + A.u[end - 1])
return out
else
if t <= t_tildeᵢ₋₂
out = integrate_spline_section(A, idx - 1, t)
out -= integrate_spline_section(A, idx - 1, A.t[idx - 1])
return out
else
out = integrate_spline_section(A, idx - 1, t_tildeᵢ₋₂)
out -= integrate_spline_section(A, idx - 1, A.t[idx - 1])
end
end
u_tildeᵢ₋₁ = u_tilde[i - 1]
t_tildeᵢ₋₁ = t_tilde[i - 1]
# Integration of linear section
if t <= t_tildeᵢ₋₁
u_t = A(t)
out += 0.5 * (t - t_tildeᵢ₋₂) * (u_t + u_tildeᵢ₋₂)
return out
else
out += 0.5 * (t_tildeᵢ₋₁ - t_tildeᵢ₋₂) * (u_tildeᵢ₋₁ + u_tildeᵢ₋₂)
end
# Integration of upper spline section
out += integrate_spline_section(A, idx, t)
return out
end
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 6098 | abstract type AbstractInterpolationIntInv{T} <: AbstractInterpolation{T} end
"""
LinearInterpolationIntInv(A::SmoothedLinearInterpolation)
Inverting the integral of a LinearInterpolation object if possible. The `A.u` must be non-negative.
## Arguments
- A The LinearInterpolation object whose integral is inverted.
"""
struct LinearInterpolationIntInv{uType, tType, T} <: AbstractInterpolationIntInv{T}
u::uType
t::tType
cache::LinearInterpolationIntInvCache{uType}
extrapolate::Bool
function LinearInterpolationIntInv(u, t, cache, extrapolate)
return new{typeof(u), typeof(t), eltype(u)}(u, t, cache, extrapolate)
end
end
"""
Invert the integral of a LinearInterpolation object, which yields
LinearInterpolationIntInv object.
"""
function invert_integral(A::LinearInterpolation)::LinearInterpolationIntInv
@assert all(A.u .>= 0) "Inverting the integral is only supported for non-negative LinearInterpolation."
t = DataInterpolations.integral.(Ref(A), A.t)
cache = LinearInterpolationIntInvCache(A.u, A.t)
return LinearInterpolationIntInv(A.t, t, cache, A.extrapolate)
end
function DataInterpolations._interpolate(
A::LinearInterpolationIntInv{<:AbstractVector},
V::Number,
iguess,
)
(; cache) = A
# idx of smallest idx such that A.t[idx] >= V
# Note that A.t denotes integrated values
idx = searchsortedfirstcorrelated(A.t, V, iguess)
if idx == length(A.t) + 1
idx -= 1
end
if idx == 1
@assert V >= 0 "Cannot invert integral for negative input."
idx = 2
end
Vdiff = (V - A.t[idx - 1])
@assert Vdiff >= 0 "Vdiff must be non_negative, got V = $V, Vdiff = $Vdiff, idx = $idx"
t_prev = A.u[idx - 1]
idx = min(idx, length(A.u))
i = idx - 1
if cache.degenerate_slope[i]
# Special case when LinearInterpolation is (near) constant
t_prev + Vdiff / cache.u[idx]
else
t_prev +
(-cache.u[i] + sqrt(cache.u[i]^2 + 2 * cache.slope[i] * Vdiff)) / cache.slope[i]
end
end
"""
SmoothedLinearInterpolationIntInv(A::SmoothedLinearInterpolation)
Inverting the integral of a SmoothedLinearInterpolation object if possible. The `A.u` must be non-negative.
## Arguments
- A The SmoothedLinearInterpolation object whose integral is inverted.
"""
struct SmoothedLinearInterpolationIntInv{uType, tType, λType <: Real, T} <:
AbstractInterpolationIntInv{T}
u::uType
t::tType
cache::SmoothedLinearInterpolationCache{uType, tType, λType}
cache_integration::SmoothedLinearInterpolationIntInvCache{uType}
extrapolate::Bool
function SmoothedLinearInterpolationIntInv(u, t, cache, cache_int, λ, extrapolate)
return new{typeof(u), typeof(t), typeof(λ), eltype(u)}(
u,
t,
cache,
cache_int,
extrapolate,
)
end
end
"""
Invert the integral of a SmoothedLinearInterpolation object, which yields
SmoothedLinearInterpolationIntInv object.
"""
function invert_integral(A::SmoothedLinearInterpolation)::SmoothedLinearInterpolationIntInv
@assert all(A.u .>= 0) "Inverting the integral is only supported for non-negative SmoothedLinearInterpolation."
(; cache, extrapolate) = A
t = DataInterpolations.integral.(Ref(A), cache.t_tilde)
u = cache.t_tilde
cache_int = SmoothedLinearInterpolationIntInvCache(A)
return SmoothedLinearInterpolationIntInv(u, t, cache, cache_int, cache.λ, extrapolate)
end
function DataInterpolations._interpolate(
A::SmoothedLinearInterpolationIntInv{<:AbstractVector},
V::Number,
iguess,
)
n_points = length(A.t)
(; u, t, cache, cache_integration) = A
(; degree, c4, c3, c2, c1, p, q, degenerate_Δu) = cache_integration
# idx of smallest idx such that A.t[idx] >= V
# Note that A.t denotes integrated values
idx = searchsortedfirstcorrelated(A.t, V, iguess)
if idx == 1
@assert V >= 0 "Cannot invert integral for negative input."
idx = 2
end
if idx == 2
# First half spline section
# which is linear
Vdiff = (V - t[1])
@assert Vdiff >= 0
if isapprox(cache.linear_slope[1], 0; atol = 1e-5)
u[1] + Vdiff / cache.u[1]
else
u[1] +
(-cache.u[1] + sqrt(cache.u[1]^2 + 2 * cache.linear_slope[1] * Vdiff)) /
cache.linear_slope[1]
end
elseif idx == n_points + 1
# Extrapolation
Vdiff = (V - t[end])
@assert Vdiff >= 0
if isapprox(cache.linear_slope[end], 0; atol = 1e-5)
u[end] + Vdiff / cache.u[end]
else
u[end] +
(-cache.u[end] + sqrt(cache.u[end]^2 + 2 * cache.linear_slope[end] * Vdiff)) /
cache.linear_slope[end]
end
elseif idx % 2 == 0
Vdiff = (V - A.t[idx - 1])
@assert Vdiff >= 0
i = idx ÷ 2
c4ᵢ = c4[i]
c3ᵢ = c3[i]
c2ᵢ = c2[i]
c1ᵢ = c1[i]
c0 = -Vdiff
pᵢ = p[i]
qᵢ = q[i]
degᵢ = degree[i]
# Check the 4 possible roots for being valid;
# real and in [0,1]
root_iterator = iterate_roots(degᵢ, c4ᵢ, c3ᵢ, c2ᵢ, c1ᵢ, c0, pᵢ, qᵢ)
for s in root_iterator
if valid(s)
return T_s(A, real(s), i)
end
end
error("No valid root found, got $(collect(root_iterator)) for V = $V.")
else
# Linear section of SmoothedLinearInterpolation
Vdiff = (V - A.t[idx - 1])
@assert Vdiff >= 0
i = (idx - 1) ÷ 2
if degenerate_Δu[i + 1]
# Special case when SmoothedLinearInterpolation is (near) constant
A.u[idx - 1] + Vdiff / cache.u[i]
else
Δuᵢ₊₁ = cache.Δu[i + 1]
Δtᵢ₊₁ = cache.Δt[i + 1]
u_frac = cache.u[i] / Δuᵢ₊₁
λ = cache.λ
root = sqrt(u_frac^2 + λ * (u_frac + λ / 4) + 2 * Vdiff / (Δtᵢ₊₁ * Δuᵢ₊₁))
cache.t[i] + (-u_frac + sign(u_frac) * root) * Δtᵢ₊₁
end
end
end
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 2254 | """
SmoothedLinearInterpolation(u, t; λ = 0.25, extrapolate = false)
The method of interpolating between the data points using a linear polynomial, with an anterval around the
corner points being replaced by a smooth spline section.
## Arguments
- `u`: data points.
- `t`: time points.
## Keyword Arguments
- `extrapolate`: boolean value to allow extrapolation. Defaults to `false`.
- `λ`: The relative size of the spline interval. The interval extents a fraction `λ/2` towards
the neighbouring time points.
"""
struct SmoothedLinearInterpolation{uType, tType, λType <: Real, T} <:
AbstractInterpolation{T}
u::uType
t::tType
cache::SmoothedLinearInterpolationCache{uType, tType, λType}
linear_itp::LinearInterpolation{uType, tType, T}
extrapolate::Bool
function SmoothedLinearInterpolation(u, t, cache, λ, linear_itp, extrapolate)
return new{typeof(u), typeof(t), typeof(λ), eltype(u)}(
u,
t,
cache,
linear_itp,
extrapolate,
)
end
end
function SmoothedLinearInterpolation(
u,
t;
λ = 0.25,
extrapolate::Bool = false,
)::SmoothedLinearInterpolation
u, t = munge_data(u, t)
# Make sure the parameter λ is in the right range
@assert 0 <= λ <= 1 "The parameter λ must be in the interval [0,1], got $λ."
cache = SmoothedLinearInterpolationCache(u, t, λ)
linear_itp = LinearInterpolation(u, t; extrapolate)
return SmoothedLinearInterpolation(u, t, cache, λ, linear_itp, extrapolate)
end
function DataInterpolations._interpolate(
A::SmoothedLinearInterpolation{<:AbstractVector},
t::Number,
iguess,
)
(; u, t_tilde) = A.cache
# idx of smallest idx such that A.t[idx] >= t
idx = searchsortedfirstcorrelated(A.t, t, iguess)
if idx == 1 || idx == length(u) + 1
# Linear extrapolation
DataInterpolations._interpolate(A.linear_itp, t, idx)[1]
else
# Interpolation
if t < t_tilde[2 * idx - 2]
U(A, t, idx - 1)
elseif t > t_tilde[2 * idx - 1]
U(A, t, idx)
else
# Linear interpolation
DataInterpolations._interpolate(A.linear_itp, t, idx)[1]
end
end
end
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 10458 | """
S(A, t, idx)
Compute the spline parameter `s` from the time `t`.
## Arguments
- `A`: The `SmoothedLinearInterpolation` object
- `t`: The time point
- `idx`: The index indicating which spline section
"""
function S(A::SmoothedLinearInterpolation, t, idx)
(; Δt, ΔΔt, degenerate_ΔΔt, t_tilde, λ) = A.cache
Δtᵢ = Δt[idx]
ΔΔtᵢ = ΔΔt[idx]
tdiff = t - t_tilde[2 * idx - 1]
@assert tdiff >= 0
if degenerate_ΔΔt[idx]
# Degenerate case Δtᵢ₊₁ ≈ Δtᵢ
s = 1 / λ * tdiff / Δtᵢ
else
s = (-Δtᵢ + sqrt(Δtᵢ^2 + 2 * ΔΔtᵢ * tdiff / λ)) / ΔΔtᵢ
end
ε = 1e-5
@assert -ε <= s <= 1 + ε "s should be in [0,1], got $s."
return s
end
"""
S_deriv(A, t, idx)
Compute the derivative of the spline parameter `s` at the time `t`.
## Arguments
- `A`: The `SmoothedLinearInterpolation` object
- `t`: The time point
- `idx`: The index indicating which spline section
"""
function S_deriv(A::SmoothedLinearInterpolation, t, idx)
(; Δt, ΔΔt, degenerate_ΔΔt, t_tilde, λ) = A.cache
Δtᵢ = Δt[idx]
ΔΔtᵢ = ΔΔt[idx]
tdiff = t - t_tilde[2 * idx - 1]
@assert tdiff >= 0
if degenerate_ΔΔt[idx]
# Degenerate case Δtᵢ₊₁ ≈ Δtᵢ
s_deriv = 1 / (λ * Δtᵢ)
else
s_deriv = 1 / (λ * sqrt(Δtᵢ^2 + 2 * ΔΔtᵢ * tdiff / λ))
end
return s_deriv
end
"""
U(A, t, idx)
Compute the spline value `u` at the time `t`.
## Arguments
- `A`: The `SmoothedLinearInterpolation` object
- `t`: The time point
- `idx`: The index indicating which spline section
"""
function U(A::SmoothedLinearInterpolation, t, idx)
s = S(A, t, idx)
return U_s(A, s, idx)
end
function U_deriv(A::SmoothedLinearInterpolation, t, idx)
s = S(A, t, idx)
s_deriv = S_deriv(A, t, idx)
return U_s_deriv(A, s, idx) * s_deriv
end
"""
U_s(A, t, idx)
Compute the spline value `u` from the spline parameter `s`.
## Arguments
- `A`: The `SmoothedLinearInterpolation` object
- `s`: The spline parameter value
- `idx`: The index indicating which spline section
"""
function U_s(A::AbstractInterpolation, s, idx)
(; Δu, ΔΔu, u_tilde, λ) = A.cache
Δuᵢ = Δu[idx]
ΔΔuᵢ = ΔΔu[idx]
return λ / 2 * ΔΔuᵢ * s^2 + λ * Δuᵢ * s + u_tilde[2 * idx - 1]
end
"""
U_s_deriv(A, t, idx)
Compute the derivative of the spline value `u` at the spline parameter value `s`.
## Arguments
- `A`: The `SmoothedLinearInterpolation` object
- `s`: The spline parameter value
- `idx`: The index indicating which spline section
"""
function U_s_deriv(A::AbstractInterpolation, s, idx)
(; Δu, ΔΔu, λ) = A.cache
Δuᵢ = Δu[idx]
ΔΔuᵢ = ΔΔu[idx]
return λ * ΔΔuᵢ * s + λ * Δuᵢ
end
"""
Compute the coefficients for the quartic polynomial in s for the integration
of a spline section
Vdiff = c4 * s^4 + c3 * s^3 + c2 * s^2 + c1 * s + c0
"""
function get_quartic_coefficients(A::SmoothedLinearInterpolation, idx::Number)
(; Δu, Δt, ΔΔu, ΔΔt, u_tilde, λ) = A.cache
i = 2 * idx
Δtᵢ = Δt[idx]
Δuᵢ = Δu[idx]
ΔΔuᵢ = ΔΔu[idx]
ΔΔtᵢ = ΔΔt[idx]
f₁ = u_tilde[i - 1] / λ
f₂ = λ^2 / 24
c4 = f₂ * (3 * ΔΔtᵢ * ΔΔuᵢ)
c3 = f₂ * (4 * Δtᵢ * ΔΔuᵢ + 8 * ΔΔtᵢ * Δuᵢ)
c2 = f₂ * (12 * ΔΔtᵢ * f₁ + 12 * Δtᵢ * Δuᵢ)
c1 = f₂ * (24 * Δtᵢ * f₁)
return c4, c3, c2, c1
end
"""
Determine whether a value s is valid, i.e.
- Its imaginary part is close to 0;
- Its real part is in the interval [0.1].
"""
valid(s) = isapprox(imag(s), 0; atol = 1e-4) && (0 <= real(s) <= 1)
"""
T_s(A, t, idx)
Compute the time `t` from the spline parameter `s`.
## Arguments
- `A`: The `SmoothedLinearInterpolation` object
- `s`: The spline parameter value
- `idx`: The index indicating which spline section
"""
function T_s(A::SmoothedLinearInterpolationIntInv, s, idx)
(; Δt, ΔΔt, t_tilde, λ) = A.cache
Δtᵢ = Δt[idx]
ΔΔtᵢ = ΔΔt[idx]
return λ / 2 * ΔΔtᵢ * s^2 + λ * Δtᵢ * s + t_tilde[2 * idx - 1]
end
struct RootIterator{T1, T2, T3, T4, D}
# T1: Real constants
# T2: Real constants depending on c0
# T2: Complex constants depending on c0
degree::D
ab_part::T4
c2::T1
c3::T1
Δ₀::T2
Δ₁::T2
Q::T3
S::T3
p::T1
q::T1
end
"""
iterate_roots(degree, c4, c3, c2, c1, c0, p, q)
Generate an iterator object which iterates over the roots
of the polynomial with the given coefficients of the given degree.
Coefficients for terms higher than the degree are not used,
and p, q are only used when degree = 4.
"""
function iterate_roots(
degree,
c4::T1,
c3::T1,
c2::T1,
c1::T1,
c0::T2,
p::T1,
q::T1,
)::RootIterator where {T1, T2}
Δ₀ = zero(c0)
Δ₁ = zero(c0)
Q = zero(Complex(c0))
S = zero(Complex(c0))
if degree == 1
ab_part = -c0 / c1
elseif degree == 2
Δ₀ = c1^2 - 4 * c2 * c0
ab_part = -c1 / (2 * c2)
else
Δ₀ = c2^2 - 3 * c3 * c1 + 12 * c4 * c0
Δ₁ =
2 * c2^3 - 9 * c3 * c2 * c1 + 27 * c3^2 * c0 + 27 * c4 * c1^2 -
72 * c4 * c2 * c0
Q = ∛((Δ₁ + sqrt(Complex(Δ₁^2 - 4 * Δ₀^3))) / 2)
if degree == 3
ab_part = -c2 / (3 * c3)
else
ab_part = -c3 / (4 * c4)
S = sqrt(-2 * p / 3 + (Q + Δ₀ / Q) / (3 * c4)) / 2
end
end
return RootIterator(degree, ab_part, c2, c3, Δ₀, Δ₁, Q, S, p, q)
end
function Base.cbrt(z::Complex)
ϕ = angle(z) / 3
r = abs(z)
return ∛(r) * Complex(cos(ϕ), sin(ϕ))
end
"""
Compute a root of a quartic polynomial
"""
function quartic_root(root_iterator::RootIterator{T1, T2, T3}, state)::T3 where {T1, T2, T3}
(; ab_part, S, p, q) = root_iterator
# Order the roots in order of likelihood of being the right one (empirically)
sign_1 = state % 3 == 1 ? -1 : 1
sign_2 = state < 3 ? 1 : -1
root = sqrt(-4S^2 - 2p - sign_1 * q / S)
out = ab_part + sign_1 * S + sign_2 * 0.5 * root
return out
end
"""
Compute a root of a cubic polynomial
"""
function cube_root(root_iterator::RootIterator{T1, T2, T3}, state)::T3 where {T1, T2, T3}
(; c3, Q, Δ₀, ab_part) = root_iterator
ξ = exp(2π * im / 3)
C = Q * ξ^(state - 1)
return ab_part - (C + Δ₀ / C) / (3 * c3)
end
"""
Compute a root of a quadratic polynomial
"""
function square_root(root_iterator::RootIterator{T1, T2, T3}, state)::T3 where {T1, T2, T3}
(; c2, ab_part, Δ₀) = root_iterator
return ab_part + (-1)^state * sqrt(Δ₀) / (2 * c2)
end
"""
Compute a root of a linear polynomial
"""
function linear_root(root_iterator::RootIterator{T1, T2, T3}, state)::T3 where {T1, T2, T3}
return root_iterator.ab_part
end
function root(root_iterator::RootIterator{T1, T2, T3}, state)::T3 where {T1, T2, T3}
(; degree) = root_iterator
if degree == 4
quartic_root(root_iterator, state)
elseif degree == 3
cube_root(root_iterator, state)
elseif degree == 2
square_root(root_iterator, state)
else
linear_root(root_iterator, state)
end
end
Base.length(root_iterator::RootIterator) = root_iterator.degree
Base.iterate(root_iterator::RootIterator) = (root(root_iterator, 1), 2)
Base.iterate(root_iterator::RootIterator, state) =
state > root_iterator.degree ? nothing : (root(root_iterator, state), state + 1)
function p_coeff(c4::T2, c3::T2, c2::T2)::T2 where {T2}
return (8 * c4 * c2 - 3 * c3^2) / (8 * c4^2)
end
function q_coeff(c4::T2, c3::T2, c2::T2, c1::T2)::T2 where {T2}
return (c3^3 - 4 * c4 * c3 * c2 + 8 * c4^2 * c1) / (8 * c4^3)
end
"""
Compute u_tilde, the value of u at the boundary points between linear and spline sections
of a SmootedLinearInterpolation curve.
"""
function get_spline_ends(u, Δu, λ)
u_tilde = zeros(2 * length(u))
u_tilde[1] = u[1]
u_tilde[2:2:(end - 1)] = u[1:(end - 1)] .+ (λ / 2) .* Δu[2:(end - 1)]
u_tilde[3:2:end] = u[2:end] .- (λ / 2) .* Δu[2:(end - 1)]
u_tilde[end] = u[end]
return u_tilde
end
"""
LinearInterpolation(A::SmoothedLinearInterpolation; n_samples = 10)
Converting a SmoothedLinearInterpolation object into LinearInterpolation object
by sampling the spline sections. The main usage of this is that a LinearInterpolation
and especially its integration inverse are much cheaper to evaluate than the
original smoothed equivalents.
Arguments
- `A`: The SmoothedLinearInterpolation object
## Keyword Arguments
- `n_samples`: The number of samples per spline section
"""
function DataInterpolations.LinearInterpolation(
A::SmoothedLinearInterpolation;
n_samples = 10,
)::LinearInterpolation
t = zeros(2 + (length(A.t) - 2) * n_samples)
for i in eachindex(A.t)
if i == 1
t[1] = A.t[1]
elseif i == length(A.t)
t[end] = A.t[end]
else
t_tildeⱼ = A.cache.t_tilde[2 * i - 1]
t_tildeⱼ₊₁ = A.cache.t_tilde[2 * i]
t[(2 + (i - 2) * n_samples):(1 + (i - 1) * n_samples)] =
range(t_tildeⱼ, t_tildeⱼ₊₁; length = n_samples)
end
end
u = A.(t)
return LinearInterpolation(u, t; A.extrapolate)
end
function Base.show(io::IO, cache::AbstractCache{uType}) where {uType}
println(io, typeof(cache))
println(
io,
"Note: t, u stand for the inputs and outputs respectively of the original interpolation, not the inversion.",
)
data = Dict{Int, Vector{AbstractVector}}()
header = Dict{Int, Vector{Symbol}}()
for propertyname in propertynames(cache)
property = getfield(cache, propertyname)
if property isa AbstractVector
L = length(property)
if L ∉ keys(data)
data[L] = AbstractVector[]
header[L] = Symbol[]
end
push!(header[L], propertyname)
push!(data[L], property)
end
end
for L in keys(data)
data_L = hcat(data[L]...)
header_L = header[L]
pretty_table(io, data_L; header = header_L, vcrop_mode = :middle)
end
end
function forward_itp(A::LinearInterpolationIntInv)
return LinearInterpolation(A.cache.u, A.u; A.extrapolate)
end
function forward_itp(A::SmoothedLinearInterpolationIntInv)
linear_itp = DataInterpolations.LinearInterpolation(A.cache.u, A.cache.t)
return SmoothedLinearInterpolation(
A.cache.u,
A.cache.t,
A.cache,
A.cache.λ,
linear_itp,
A.extrapolate,
)
end
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 1512 | using SmoothInterpolation
using Random
using ForwardDiff
@testset "SmoothedLinearInterpolation degenerate" begin
Random.seed!(1)
ε = 1e-5
u = cumsum(rand(5))
t = [1.0, 2.0, 3.0, 4.0, 5.0]
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
u₋ = @. itp(itp.cache.t_tilde - ε)
u₊ = @. itp(itp.cache.t_tilde + ε)
@test u₋ ≈ u₊ atol = 1e-4
end
@testset "SmoothedLinearInterpolation non-degenerate" begin
Random.seed!(1)
ε = 1e-5
u = cumsum(rand(5))
t = cumsum(rand(5) .+ (1:5))
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
u₋ = @. itp(itp.cache.t_tilde - ε)
u₊ = @. itp(itp.cache.t_tilde + ε)
@test u₋ ≈ u₊ atol = 1e-4
end
@testset "SmoothedLinearInterpolation degenerate derivative" begin
Random.seed!(1)
ε = 1e-5
u = cumsum(rand(5))
t = [1.0, 2.0, 3.0, 4.0, 5.0]
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
du₋ = ForwardDiff.derivative.(Ref(itp), itp.cache.t_tilde .- ε)
du₊ = ForwardDiff.derivative.(Ref(itp), itp.cache.t_tilde .+ ε)
@test du₋ ≈ du₊ atol = 1e-4
end
@testset "SmoothedLinearInterpolation non-degenerate derivative" begin
Random.seed!(1)
ε = 1e-5
u = cumsum(rand(5))
t = cumsum(rand(5) .+ (1:5))
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
du₋ = ForwardDiff.derivative.(Ref(itp), itp.cache.t_tilde .- ε)
du₊ = ForwardDiff.derivative.(Ref(itp), itp.cache.t_tilde .+ ε)
@test du₋ ≈ du₊ atol = 1e-4
end | SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 1423 | using Random
using DataInterpolations
using SmoothInterpolation
using ForwardDiff
@testset "SmoothedLinearInterpolation" begin
Random.seed!(10)
t = cumsum(rand(10))
u = rand(10)
t_eval = (t[1] - 1):0.01:(t[end] + 1)
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
u_deriv_eval = DataInterpolations.derivative.(Ref(itp), t_eval)
u_deriv_check = ForwardDiff.derivative.(Ref(itp), t_eval)
@test u_deriv_eval ≈ u_deriv_check
end
@testset "LinearInterpolationIntInv" begin
Random.seed!(10)
t = cumsum(rand(10))
u = rand(10)
itp = LinearInterpolation(u, t; extrapolate = true)
itp_int_inv = invert_integral(itp)
u_int_eval = itp_int_inv.t[1]:0.01:(itp_int_inv.t[end] + 1)
t_deriv_eval = DataInterpolations.derivative.(Ref(itp_int_inv), u_int_eval)
t_deriv_check = ForwardDiff.derivative.(Ref(itp_int_inv), u_int_eval)
@test t_deriv_eval ≈ t_deriv_check
end
@testset "SmoothedLinearInterpolationIntInv" begin
Random.seed!(10)
t = cumsum(rand(10))
u = rand(10)
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
itp_int_inv = invert_integral(itp)
u_int_eval = itp_int_inv.t[1]:0.01:(itp_int_inv.t[end] + 1)
t_deriv_eval = DataInterpolations.derivative.(Ref(itp_int_inv), u_int_eval)
t_deriv_check = ForwardDiff.derivative.(Ref(itp_int_inv), u_int_eval)
@test t_deriv_eval ≈ t_deriv_check
end
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 415 | using SmoothInterpolation
@testset "SmoothedLinearInterpolation" begin
u = [1.0, 2.0, 3.0, 4.0]
t = [1.0, 2.0, 3.0, 4.0]
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
@test itp(0.0) ≈ 0.0
@test itp(5.0) ≈ 5.0
u = zeros(5)
t = [1.0, 2.0, 3.0, 4.0, 5.0]
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
@test itp(0.0) ≈ 0.0
@test itp(5.0) ≈ 0.0
end | SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 1157 | using DataInterpolations
using SmoothInterpolation
using Random
using ForwardDiff
@testset "SmoothedLinearInterpolation integration outcome" begin
Random.seed!(1)
u = rand(5)
t = cumsum(rand(5))
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
# With extrapolation
t_eval = t[1]:0.1:(t[end] + 1)
u_int = DataInterpolations.integral.(Ref(itp), t_eval)
# Numerical integration
u_eval = itp.(t_eval)
u_int_num = 0.5 * 0.1 * (u_eval[2:end] + u_eval[1:(end - 1)])
u_int_num = cumsum(u_int_num)
pushfirst!(u_int_num, 0.0)
@test u_int ≈ u_int_num rtol = 1e-3
end
@testset "SmoothedLinearInterpolation integration derivative" begin
Random.seed!(1)
t = cumsum(rand(5))
u = rand(5)
t = [1.0, 2.0, 3.0, 4.0, 5.0]
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
# With extrapolation
t_eval = t[1]:0.1:(t[end] + 1)
u_eval = itp.(t_eval)
integral = t -> DataInterpolations.integral(itp, t)
u_eval_ = ForwardDiff.derivative.(Ref(integral), t_eval)
# Automatic derivative at t = itp.t[1] fails (#25)
@test u_eval[2:end] ≈ u_eval_[2:end]
end | SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 2103 | using DataInterpolations
using SmoothInterpolation
using Random
@testset "SmoothedLinearInterpolation degenerate" begin
Random.seed!(1)
u = cumsum(rand(5))
t = [1.0, 2.0, 3.0, 4.0, 5.0]
itp = SmoothedLinearInterpolation(u, t)
@test all(itp.cache.ΔΔt .≈ 0)
@test itp.(1.5:0.3:5.0) ≈ [
0.24799,
0.35276,
0.49293,
0.70214,
0.91179,
1.11923,
1.30991,
1.49839,
1.68723,
1.93269,
2.20716,
2.48164,
] atol = 1e-4
@test_nowarn string(itp.cache)
end
@testset "SmoothedLinearInterpolation non-degenerate" begin
Random.seed!(2)
u = cumsum(rand(5))
t = cumsum(rand(5) .+ (1:5))
itp = SmoothedLinearInterpolation(u, t)
@test !any(itp.cache.ΔΔt[2:(end - 1)] .≈ 0)
@test itp.(t[1]:1.2:t[end]) ≈ [
0.00225,
0.30983,
0.61718,
0.88465,
1.14094,
1.39383,
1.60492,
1.80825,
2.01157,
2.19797,
2.27307,
2.32361,
2.37415,
2.42469,
] atol = 1e-4
@test_nowarn string(itp)
end
@testset "LinearInterpolationIntInv" begin
Random.seed!(9)
u = rand(5)
# Add degenerate case of constant u
push!(u, u[end])
t = cumsum(rand(6))
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
itp = LinearInterpolation(itp)
itp_int_inv = invert_integral(itp)
t_eval = range(t[1], t[end]; length = 200)
u_int_eval = DataInterpolations.integral.(Ref(itp), t_eval)
@test t_eval ≈ itp_int_inv.(u_int_eval)
@test_nowarn string(itp_int_inv.cache)
end
@testset "SmoothedLinearInterpolationIntInv" begin
Random.seed!(9)
u = rand(5)
# Add degenerate case of constant u
push!(u, u[end])
t = cumsum(rand(6))
itp = SmoothedLinearInterpolation(u, t)
itp_int_inv = invert_integral(itp)
t_eval = range(t[1], t[end]; length = 200)
u_int_eval = DataInterpolations.integral.(Ref(itp), t_eval)
@test t_eval ≈ itp_int_inv.(u_int_eval)
@test_nowarn string(itp_int_inv.cache_integration)
end
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 361 | using SafeTestsets
@safetestset "Utils" include("utils_test.jl")
@safetestset "Interpolation" include("interpolation_test.jl")
@safetestset "Extrapolation" include("extrapolation_test.jl")
@safetestset "Continuity" include("continuity_test.jl")
@safetestset "Integration" include("integration_test.jl")
@safetestset "Derivatives" include("derivatives_test.jl") | SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | code | 993 | using SmoothInterpolation: iterate_roots, p_coeff, q_coeff
@testset "Polynomial solving" begin
p = 0.0
q = 0.0
# Degree 1
c4 = 0.0
c3 = 0.0
c2 = 0.0
c1 = 2.0
c0 = 4.0
roots = collect(iterate_roots(1, c4, c3, c2, c1, c0, p, q))
@test length(roots) == 1
@test only(roots) ≈ -2.0
# Degree 2
c4 = 0.0
c3 = 0.0
c2 = 1.0
c1 = -1.0
c0 = -1.0
roots = collect(iterate_roots(2, c4, c3, c2, c1, c0, p, q))
@test length(roots) == 2
ϕ = (sqrt(5) + 1) / 2
@test roots ≈ [1 - ϕ, ϕ]
# Degree 3
c4 = 0.0
c3 = 3.0
c2 = -63.0
c1 = 429.0
c0 = -945.0
roots = collect(iterate_roots(3, c4, c3, c2, c1, c0, p, q))
@test roots ≈ Float64[5, 9, 7]
# Degree 4
c4 = 1.0
c3 = -10.0
c2 = 35.0
c1 = -50.0
c0 = 24.0
p = p_coeff(c4, c3, c2)
q = q_coeff(c4, c3, c2, c1)
roots = collect(iterate_roots(4, c4, c3, c2, c1, c0, p, q))
@test roots ≈ Float64[2, 4, 3, 1]
end | SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | docs | 1963 | [](https://codecov.io/gh/SouthEndmusic/SmoothInterpolation.jl)
<img src="docs/src/assets/logo.svg" width="200">
# SmoothInterpolation.jl
`SmoothInterpolation.jl` exports 3 interpolation types in the style of [DataInterpolations.jl](https://github.com/SciML/DataInterpolations.jl):
- `SmoothedLinearInterpolation`, a type of linear interpolation with well-behaved smoothed corners;
- `SmoothedLinearInterpolationIntInv`, the inverse of the antiderivative of a `SmoothedLinearInterpolation` if it exists;
- `LinearInterpolationInvInv`, the inverse of the antiderivative of a `LinearInterpolation` if it exists.
## Installation
Currently you can only install as below.
```
pkg> dev https://github.com/SouthEndMusic/SmoothInterpolation.jl
```
## Supported features
Not all features for interpolation objects from `DataInterpolations.jl` are currently supported.
| | Evaluation | Derivative | Integration |
| ----------------------------------- | ---------- | ------------- | ------------------------------------------ |
| `SmoothedLinearInterpolation` | Supported | supported | Supported |
| `SmoothedLinearInterpolationIntInv` | Supported | supported | Not supported |
| `LinearInterpolationIntInv` | Supported | supported | Not supported |
If you wish to use one of the currently unsupported features, please [write an issue](https://github.com/SouthEndMusic/SmoothInterpolation.jl/issues). Note that differentiation can also be achieved with many of the [automatic differentiation packages](https://juliadiff.org/#the_big_list) in the Julia ecosystem.
## Logo
The logo is inspired by the [julia logo graphics](https://github.com/JuliaLang/julia-logo-graphics).
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | docs | 248 | # API
Documentation for `SmoothInterpolation.jl`'s public interface.
```@docs
SmoothedLinearInterpolation
LinearInterpolation(::SmoothedLinearInterpolation)
invert_integral(::LinearInterpolation)
invert_integral(::SmoothedLinearInterpolation)
``` | SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | docs | 874 | # To Cache or not to Cache
At the initialization of the interpolation objects exposed by this package, a lot of data is precomputed and cached, see for instance the example below.
```@example 1
import Random # hide
Random.seed!(2) # hide
using SmoothInterpolation
u = rand(10)
t = cumsum(rand(10))
itp = SmoothedLinearInterpolation(u, t)
itp.cache
```
This means that evaluation of the interpolation is faster, at the cost of more memory allocation at initialization. This is in contrast to the interpolation in `DataInterpolations.jl`, where very little to no memory is allocated at the initialization of interpolation objects. What is better depends on the application.
If you want to use the interpolation objects exposed by this package without pre-allocation, please let me know in [this issue](https://github.com/SouthEndMusic/SmoothInterpolation.jl/issues/45). | SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | docs | 2477 | # Examples
## Smoothed linear interpolation
```@example 1
import Random # hide
Random.seed!(2) # hide
using SmoothInterpolation
u = rand(10)
t = cumsum(rand(10))
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
```
```@example 1
using Plots
scatter(itp.t, itp.u, label = "Input")
plot!(itp, label = "Smoothed Linear Interpolation")
```
## Inverting the integral
```@example 1
itp_int_inv = invert_integral(itp)
V = 1.0
t_V = itp_int_inv(V)
t_eval_V = range(t[1], t_V, length = 100)
plot!(t_eval_V, itp.(t_eval_V), fill = (:blue, 0, 0.5), label = "area = $V")
```
!!! tip
The integral inverse of `SmoothedLinearInterpolation` is expensive to compute as it involves solving a quartic equation. If performance is important to your application, consider converting your `SmoothedLinearInterpolation` object into a `LinearInterpolation` object using `LinearInterpolation(A::SmoothedLinearInterpolation; n_samples = 10)`, which samples the spline sections. The inverse of this is much cheaper.
## The effect of the parameter λ
```@example 1
using ColorSchemes
t = [0, 1, 2, 2.5, 3, 3.5, 4]
u = Float64[-1, 1, -1, 0, 1, 0, -1]
pl = plot()
scatter!(t, u, label = "Input", legend = :top)
N = 101
Λ = range(0, 1, length = N)
colors = cgrad(:jet, range(0, 1, length = N))
for (i, (λ, color)) in enumerate(zip(Λ, colors))
itp = SmoothedLinearInterpolation(u, t; λ)
label = i % 10 == 1 ? "λ = $λ" : nothing
plot!(itp; label, color)
end
pl
```
## Derivatives
Derivatives can be calculated using `DataInterpolations.derivative(itp, t)`. There is a quite simple relationship between the derivative of the inverse of the integral of a function and the function itself:
```math
(F^{-1})'(V) = \frac{1}{f(F^{-1}(V))}.
```
See also the code example below.
```@example 1
using DataInterpolations
using ForwardDiff
Random.seed!(15) # hide
t = cumsum(rand(10))
u = rand(10)
itp = SmoothedLinearInterpolation(u, t; extrapolate = true)
itp_int_inv = invert_integral(itp)
u_int_eval = itp_int_inv.t[1]:0.01:(itp_int_inv.t[end] + 1)
# Compute the hardcoded SmoothedLinearInterpolationIntInv derivative
t_deriv_eval = DataInterpolations.derivative.(Ref(itp_int_inv), u_int_eval)
# Compute the SmoothedLinearInterpolationIntInv derivative using ForwardDiff
t_deriv_forward_diff = ForwardDiff.derivative.(Ref(itp_int_inv), u_int_eval)
# Compare results
@show t_deriv_eval ≈ 1 ./ itp.(itp_int_inv.(u_int_eval))
@show t_deriv_eval ≈ t_deriv_forward_diff
``` | SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | docs | 1748 | # SmoothInterpolation.jl
`SmoothInterpolation.jl` exports 2 interpolation types in the style of [DataInterpolations.jl](https://github.com/SciML/DataInterpolations.jl):
- `SmoothedLinearInterpolation`, a type of linear interpolation with well-behaved smoothed corners;
- `SmoothedLinearInterpolationIntInv`, the inverse of the antiderivative of a `SmoothedLinearInterpolation` if it exists;
- `LinearInterpolationInvInv`, the inverse of the antiderivative of a `LinearInterpolation` if it exists.
## Installation
Currently you can only install as below.
```
pkg> dev https://github.com/SouthEndMusic/SmoothInterpolation.jl
```
## Supported features
Not all features for interpolation objects from `DataInterpolations.jl` are currently supported.
| | Evaluation | Derivative | Integration |
| ----------------------------------- | ---------- | ------------- | ------------------------------------------ |
| `SmoothedLinearInterpolation` | Supported | supported | Supported |
| `SmoothedLinearInterpolationIntInv` | Supported | supported | Not supported |
| `LinearInterpolationIntInv` | Supported | supported | Not supported |
If you wish to use one of the currently unsupported features, please [write an issue](https://github.com/SouthEndMusic/SmoothInterpolation.jl/issues). Note that differentiation can also be achieved with many of the [automatic differentiation packages](https://juliadiff.org/#the_big_list) in the Julia ecosystem.
## Logo
The logo is inspired by the [julia logo graphics](https://github.com/JuliaLang/julia-logo-graphics). | SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | docs | 5782 | # Construction of smoothed linear interpolation
## Linear interpolation
Given is the set of points $(\mathbf{p}_i)_{i=1}^n$ in $\mathbb{R}^2$, where we write $t$ and $u$ for the respective coordinates. We assume that the $t_i$ are strictly increasing. Linear interpolation of these points is then simply given by
```math
\begin{equation}
u|_{[t_{i-1}, t_i]}(t) = \frac{u_i-u_{i-1}}{t_i-t_{i-1}}(t-t_{i-1}).
\end{equation}
```
Note that there is a discontinuity in derivative of the function $u$ at each $t_i$ for $i = 2, \ldots, n-1$.
## Smoothed spline corners
To get rid of the discontinuities mentioned in the previous section, we take out a section of the interpolation around each discontinuity and replace it with a spline curve.
### New points
For the construction of the smoothing we consider the consecutive points
```math
\mathbf{p}_{i-1}, \mathbf{p}_{i}, \mathbf{p}_{i+1}.
```
Now we disregard $\mathbf{p}_i$, and introduce 2 new points
```math
\begin{equation}
\begin{aligned}
\mathbf{p}_{i-\frac{\lambda}{2}} =&\; \mathbf{p}_i - \frac{\lambda}{2}\Delta\mathbf{p}_i \\
\mathbf{p}_{i+\frac{\lambda}{2}} =&\; \mathbf{p}_i + \frac{\lambda}{2}\Delta\mathbf{p}_{i+1}
\end{aligned}
\end{equation}
```
where $\Delta\mathbf{p}_i = \mathbf{p}_i - \mathbf{p}_{i-1}$ and $\lambda \in [0,1]$. We will connect these points with a spline curve, and so $\lambda$ determines the size of the interval around $\mathbf{p}_i$ that is replaced by the spline curve.
### Deriving the spline curve
We want to connect $\mathbf{p}_{i-\frac{\lambda}{2}}$ and $\mathbf{p}_{i+\frac{\lambda}{2}}$ with a smooth parametric curve
```math
\begin{equation}
\mathbf{C}_i : [0,1] \rightarrow \mathbb{R}^2
\end{equation}
```
such that:
- The connection can be expressed as
```math
\begin{equation}
u_i : \left[t_{i - \frac{\lambda}{2}}, t_{i + \frac{\lambda}{2}}\right] \rightarrow \mathbb{R},
\end{equation}
```
i.e. the $t$ component of the curve must be invertible.
- The connection is continuous, i.e.
```math
\begin{equation}
\mathbf{C}_i(0) = \mathbf{p}_{i-\frac{\lambda}{2}}, \quad \mathbf{C}_i(1) = \mathbf{p}_{i+\frac{\lambda}{2}}.
\end{equation}
```
- The derivative of the connection is continuous, i.e.
```math
\begin{equation}
\mathbf{C}'_i(0) \propto \Delta\mathbf{p}_i, \quad \mathbf{C}'_i(1) \propto \Delta\mathbf{p}_{i+1}.
\end{equation}
```
We can achieve this by repeated interpolation. The first interpolations are
```math
\begin{equation}
\begin{aligned}
\mathbf{C}_{i-\frac{\lambda}{2}}(s) = (1-s)\mathbf{p}_{i-\frac{\lambda}{2}} + s\mathbf{p}_i \\
\mathbf{C}_{i+\frac{\lambda}{2}}(s) = (1-s)\mathbf{p}_i + s\mathbf{p}_{i+\frac{\lambda}{2}}
\end{aligned}
\end{equation}
```
and combining these yields
```math
\begin{equation}
\begin{aligned}
\mathbf{C}_i(s) =&\; (1-s)\mathbf{C}_{i-\frac{\lambda}{2}}(s) + s\mathbf{C}_{i+\frac{\lambda}{2}}(s) \\
=&\; (1-s)^2\mathbf{p}_{i-\frac{\lambda}{2}} + 2s(1-s)\mathbf{p}_i + s^2\mathbf{p}_{i+\frac{\lambda}{2}} \\
=&\; \frac{\lambda}{2}(\Delta \mathbf{p}_{i+1} - \Delta \mathbf{p}_i)s^2 + \lambda \Delta \mathbf{p}_i s + \mathbf{p}_{i-\frac{\lambda}{2}}
\end{aligned}
\end{equation}
```
Note that the second formulation tells us that $C_i$ is a convex combination of $\mathbf{p}_{i-\frac{\lambda}{1}}, \mathbf{p}_i, \mathbf{p}_{i + \frac{\lambda}{2}}$ for all $s \in [0,1]$ and thus always is in the convex hull of these points.
### Writing spline curve as a function $u(t)$
To write the spline curve as a function $u(t)$, we first need to obtain $s$ from $t$:
```math
\begin{equation}
T_i(s) = \frac{1}{2}\lambda(\Delta t_{i+1} - \Delta t_i)s^2 + \lambda\Delta t_i s + t_{i-\frac{\lambda}{2}} = t.
\end{equation}
```
This yields
```math
\begin{equation}
S_i(t) = \frac{
-\lambda \Delta t_i + \sqrt{\lambda^2\Delta t_i^2 + 2\lambda (\Delta t_{i+1} - \Delta t_i)\left(t - t_{i-\frac{\lambda}{2}}\right)}
}{
\lambda (\Delta t_{i+1} - \Delta t_i)
},
\end{equation}
```
or, in the degenerate case that $\Delta t_{i+1} - \Delta t_i = 0$ (i.e. the 3 points are equally spaced),
```math
\begin{equation}
S_i(t) = \frac{1}{\lambda}\frac{t - t_{i - \frac{\lambda}{2}}}{\Delta t_i}.
\end{equation}
```
Note that $\Delta t_i \ne 0$ by the assumption that the $t_n$ are strictly increasing.
We conclude:
```math
\begin{equation}
u_i(t) = \frac{\lambda}{2}(\Delta u_{i+1} - \Delta u_i)S_i(t)^2 + \lambda \Delta u_i S_i(t) + u_{i - \frac{\lambda}{2}}.
\end{equation}
```
## Extrapolation
We define $\Delta \mathbf{p}_{1} = \Delta \mathbf{p}_{2}$ and $\Delta \mathbf{p}_{n+1} = \Delta \mathbf{p}_n$. This yields
```math
\begin{equation}
u_1(t) = \frac{\Delta u_2 }{\Delta t_2}(t - t_1) + u_1, \quad t \in \left[t_1, t_{1 + \frac{\lambda}{2}}\right]
\end{equation}
```
and
```math
\begin{equation}
u_n(t) = \frac{\Delta u_n}{\Delta t_n}(t - t_n) + u_n, \quad t \in \left[t_{n - \frac{\lambda}{2}}, t_n\right].
\end{equation}
```
This means that the interpolation is linear towards its boundaries and thus can be smoothly extended linearly.
## Evaluation
Once it is determined that the input $t$ is in the interval $[t_{i-1}, t_i]$, the interpolation is evalued as follows:
```math
\begin{equation}
\begin{aligned}
u|_{[t_{i-1}, t_i]}(t) =
\begin{cases}
u_{i-1}(t) &\text{if }& t_{i-1} \leq t \leq t_{i - 1 + \frac{\lambda}{2}} \\
u_{i-1} + \frac{\Delta u_i}{\Delta t_i}(t - t_{i-1}) &\text{if }& t_{i - 1 + \frac{\lambda}{2}} \leq t \leq t_{i - \frac{\lambda}{2}} \\
u_i(t) &\text{if }& t_{i - \frac{\lambda}{2}} \leq t \leq t_i
\end{cases}
\end{aligned}
\end{equation}
```
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | docs | 2978 | # Integrating
## Complete intervals
We are interested in integrating the smoothed interpolation from the start to some $t > t_1$. To compute this efficiently we need to know the integral of the interpolation over the various intervals. More precisely:
- For the linear sections we obtain
```math
\begin{equation}
\int_{t_{i-1+\frac{\lambda}{2}}}^{t_{i-\frac{\lambda}{2}}}
u_{i-1} + \frac{\Delta u_i}{\Delta t_i}(\tau - t_{i-1})
\text{d}\tau
=
(1 - \lambda)\Delta t_i
\left[
u_{i-1}
+
\frac{1}{2}(1 - \lambda) \Delta u_i
\right].
\end{equation}
```
- For the spline sections we obtain
```math
\begin{equation}
\begin{aligned}
\int_{t_{i-\frac{\lambda}{2}}}^{t_{i + \frac{\lambda}{2}}} u_i(\tau)\text{d}\tau
&=&
\int_0^1 T'_i(s)u_i(T_i(s))\text{d}s \\
&=&
\int_0^1
\left[\lambda(\Delta t_{i+1} - \Delta t_i)s + \lambda\Delta t_i\right]
\left[\frac{\lambda}{2}(\Delta u_{i+1} - \Delta u_i)s^2 + \lambda \Delta u_i s + u_{i - \frac{\lambda}{2}}\right]
\text{d}s \\
&=& \frac{\lambda^2}{24}
\left[
\Delta t_i \left(-3\Delta u_i + \Delta u_{i+1}\right) +
\Delta t_{i+1} \left(-\Delta u_i + 3 \Delta u_{i+1}\right)
\right]
+
\frac{\lambda}{2}(\Delta t_i + \Delta t_{i+1})u_i.
\end{aligned}
\end{equation}
```
## Incomplete intervals
We now define the new set of points $(\tilde{\mathbf{p}}_j)_{j=1}^{2n}$ given by all the $\mathbf{p}_{i - \frac{\lambda}{2}}, \mathbf{p}_{i+ \frac{\lambda}{2}}$ and the original boundary points, sorted by $t$. Then for $t \in \left[\tilde{t}_{J-1}, \tilde{t}_J\right]$ we have
```math
\begin{equation}
\begin{aligned}
U(t) = \int_{t_1}^t u(\tau)\text{d}\tau = \sum_{j = 2}^{J-1} \int_{\tilde{t}_{j-1}}^{\tilde{t}_j} u(\tau)\text{d}\tau + \int_{\tilde{t}_{J-1}}^t u(\tau)\text{d}\tau,
\end{aligned}
\end{equation}
```
Where the summed integrals are given by the values above. For the last integral:
- If $J$ is odd then the last integral is of a linear section:
```math
\begin{equation}
\int_{\tilde{t}_{J-1}}^t u(\tau)\text{d}\tau = \left((t-t_I) - \frac{\lambda}{2}\Delta t_{I+1}\right)u_I
+
\frac{1}{2}\frac{\Delta u_{I+1}}{\Delta t_{I+1}}\left[(t-t_I)^2 - \frac{\lambda^2}{4}\Delta t_{I+1}^2\right]
\end{equation}
```
where $I = \frac{J-1}{2}$.
- If $J$ is even the last integral is of a spline section:
```math
\begin{equation}
\begin{aligned}
\int_{\tilde{t}_{J-1}}^t u_I(\tau)\text{d}\tau &=&
\int_0^{S_I(t)} T'_I(s)u_I\left(T_I(s)\right)\text{d}s \\
&=&
\int_0^{S_I(t)}
\left[\lambda(\Delta t_{I+1} - \Delta t_I)s + \lambda\Delta t_I\right]
\left[\frac{\lambda}{2}(\Delta u_{I+1} - \Delta u_I)s^2 + \lambda \Delta u_I s + u_{I - \frac{\lambda}{2}}\right]
\text{d}s
\end{aligned}
\end{equation}
```
where $I = \frac{J}{2}$.
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | docs | 1840 | # Inverting the integral
We are interested in inverting $U(t)$ as defined above. Note that $U(t)$ is invertible if $u(t)$ is positive for all $t > t_1$. If we define
```math
\begin{equation}
U_J = \int_{t_1}^{\tilde{t}_J}u(\tau)\text{d}\tau = \sum_{j = 2}^{J} \int_{\tilde{t}_{j-1}}^{\tilde{t}_j} u(\tau)\text{d}\tau,
\end{equation}
```
then solving $U(t) = V$ for $t$ where $V \in [U_{J-1}, U_J]$ amounts to solving
```math
\begin{equation}
\int_{\tilde{t}_{J-1}}^t u(\tau)\text{d}\tau = V - U_{J-1}.
\end{equation}
```
For linear sections this yields a quadratic equation in $t$ with solution
```math
\begin{equation}
t = t_I + \left[-\frac{u_I}{\Delta u_{I+1}} + \text{sign}\left(\frac{u_{I+1}}{\Delta u_{I+1}}\right)\sqrt{\left(\frac{u_I}{\Delta u_{I+1}}\right)^2 +\lambda\left(\frac{u_I}{\Delta u_{I+1}} + \frac{\lambda}{4}\right) +2\frac{V - U_{J-1}}{\Delta t_{I+1}\Delta u_{I+1}}}\right]\Delta t_{I+1}.
\end{equation}
```
For spline sections this leads to a quartic equation in $s$:
```math
\begin{equation}
\begin{aligned}
3(\Delta t_{I+1} - \Delta t_I)(\Delta u_{I+1} - \Delta u_I)s^4 + \\
4\Delta t_I (\Delta u_{I+1} - \Delta u_I) s^3 + \\
12(\Delta t_{I+1} - \Delta t_I)\left(\Delta u_I + \frac{u_{I - \frac{\lambda}{2}}}{\lambda}\right)s^2 + \\
24 \Delta t_I \left(\Delta u_I + \frac{u_{I - \frac{\lambda}{2}}}{\lambda}\right) s + \\
- \frac{24}{\lambda^2}(V - U_{J-1}) = 0
\end{aligned}
\end{equation}
```
This quartic equation can be solved with the [quartic formula](https://en.wikipedia.org/wiki/Quartic_function#General_formula_for_roots).
[Quartic equation at Wolfram Alpha](https://www.wolframalpha.com/input?i=integrate+%28lambda*%28t_2+-+t_1%29*s+%2B+lambda*t_1%29*%28lambda%2F2+*+%28u_2+-+u_1%29*s%5E2+%2B+lambda*u_1+%2B+u_3%29+ds+from+0+to+S)
| SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.1.0 | 2870c7c22941912749fa593881903a1cd18057fc | docs | 898 | # Motivation
I didn't like the options available in `DataInterpolations.jl` for my application, so I came up with my own concept that mainly keeps the linear interpolation intact, but rounds of the corners between the linear sections. The main recipe is this, per corner:
- Add 2 points on either side close to the corner, on their respectilve linear sections;
- Remove the corner point;
- Connect the 2 new points with a spline curve.
The advantage of the spline curve over a polynomial one is that the connection can be $C^1$ smooth (i.e. the smoothed curve and its derivative are continuous) without the possibility of introducing large oscillations with the use of degree 3 polynomials.
## Application
The original application for this package is [Ribasim](https://github.com/Deltares/Ribasim), where smooth interpolation can help with the convergence of solving the non-linear ODE problem. | SmoothInterpolation | https://github.com/SouthEndMusic/SmoothInterpolation.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 769 | using TextEncodeBase
using Documenter
DocMeta.setdocmeta!(TextEncodeBase, :DocTestSetup, :(using TextEncodeBase); recursive=true)
makedocs(;
modules=[TextEncodeBase],
authors="chengchingwen <[email protected]> and contributors",
repo="https://github.com/chengchingwen/TextEncodeBase.jl/blob/{commit}{path}#{line}",
sitename="TextEncodeBase.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://chengchingwen.github.io/TextEncodeBase.jl",
assets=String[],
),
pages=[
"Home" => [
"index.md",
"design.md",
"api.md",
],
],
)
deploydocs(;
repo="github.com/chengchingwen/TextEncodeBase.jl",
devbranch="main",
)
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 5427 | using Transformers
using Transformers.Pretrain
using Transformers.BidirectionalEncoder: WordPiece, bert_cased_tokenizer
using TextEncodeBase
using TextEncodeBase: NestedTokenizer, BaseTokenization, Sentence, Word, SubWord, getvalue, Splittable
struct BertCasedTokenization <: BaseTokenization
wordpiece::WordPiece
end
# split sentence with `bert_cased_tokenizer` (define with WordTokenizers.jl's `TokenBuffer`)
TextEncodeBase.splitting(::BertCasedTokenization, s::Sentence) = bert_cased_tokenizer(getvalue(s))
# word is splittable with WordPiece
TextEncodeBase.splittability(::BertCasedTokenization, w::Word) = Splittable()
# split word with `WordPiece`
TextEncodeBase.splitting(t::BertCasedTokenization, w::Word) = t.wordpiece(getvalue(w))
tokenizer = pretrain"bert-cased_L-12_H-768_A-12:tokenizer" # this is just `bert_cased_tokenizer`
wordpiece = pretrain"bert-cased_L-12_H-768_A-12:wordpiece"
tkr = NestedTokenizer(BertCasedTokenization(wordpiece))
text1 = "Peter Piper picked a peck of pickled peppers"
single_without_TEB = text1 |> tokenizer |> wordpiece
single_with_TEB = tkr(Sentence(text1))
# `NestedTokenizer` return vector of vector
@assert single_without_TEB == map(getvalue, single_with_TEB[])
#=
julia> single_without_TEB
11-element Vector{String}:
"Peter"
"Piper"
"picked"
"a"
"p"
"##eck"
"of"
"pick"
"##led"
"pepper"
"##s"
julia> single_with_TEB
1-element Vector{Vector{TextEncodeBase.TokenStage}}:
[Token("Peter"), Token("Piper"), Token("picked"), Token("a"), Token("p"), Token("##eck"), Token("of"), Token("pick"), Token("##led"), Token("pepper"), Token("##s")]
julia> single_without_TEB == map(getvalue, single_with_TEB[])
true
=#
# define stage for batch of data
TextEncodeBase.@stage BatchSentence{A<:AbstractVector, M} TextEncodeBase.DocumentStage
# struct BatchSentence{A<:AbstractVector, M} <: TextEncodeBase.DocumentStage
# x::A
# meta::M
# end
# BatchSentence(x) = BatchSentence(x, nothing)
# TextEncodeBase.setmeta(x::BatchSentence, meta) = BatchSentence(x.x, meta)
# TextEncodeBase.setvalue(x::BatchSentence, y) = BatchSentence(y, x.meta)
# splittability and split behavior for `BatchSentence`
TextEncodeBase.splittability(::BertCasedTokenization, ::BatchSentence) = Splittable()
TextEncodeBase.splitting(::BertCasedTokenization, s::BatchSentence) = s.x
text2 = "Fuzzy Wuzzy was a bear"
texts = [text1, text2]
batch_without_TEB = map(wordpiece∘tokenizer, texts)
batch_with_TEB = tkr(BatchSentence(texts))
@assert batch_without_TEB == TextEncodeBase.nestedcall(getvalue, batch_with_TEB)
#=
julia> batch_without_TEB
2-element Vector{Vector{String}}:
["Peter", "Piper", "picked", "a", "p", "##eck", "of", "pick", "##led", "pepper", "##s"]
["Fu", "##zzy", "Wu", "##zzy", "was", "a", "bear"]
julia> batch_with_TEB
2-element Vector{Vector{TextEncodeBase.TokenStage}}:
[Token("Peter"), Token("Piper"), Token("picked"), Token("a"), Token("p"), Token("##eck"), Token("of"), Token("pick"), Token("##led"), Token("pepper"), Token("##s")]
[Token("Fu"), Token("##zzy"), Token("Wu"), Token("##zzy"), Token("was"), Token("a"), Token("bear")]
julia> batch_without_TEB == TextEncodeBase.nestedcall(getvalue, batch_with_TEB)
true
=#
using TextEncodeBase: IndexedTokenization
itkr = NestedTokenizer(IndexedTokenization(BertCasedTokenization(wordpiece)))
ibatch_with_TEB = itkr(BatchSentence(texts))
#=
# subword from same word having the same `word_id`
julia> ibatch_with_TEB[1]
11-element Vector{TextEncodeBase.TokenStage}:
Token("Peter", (sentence_id = 1, word_id = 1, token_id = 1))
Token("Piper", (sentence_id = 1, word_id = 2, token_id = 2))
Token("picked", (sentence_id = 1, word_id = 3, token_id = 3))
Token("a", (sentence_id = 1, word_id = 4, token_id = 4))
Token("p", (sentence_id = 1, word_id = 5, token_id = 5))
Token("##eck", (sentence_id = 1, word_id = 5, token_id = 6))
Token("of", (sentence_id = 1, word_id = 6, token_id = 7))
Token("pick", (sentence_id = 1, word_id = 7, token_id = 8))
Token("##led", (sentence_id = 1, word_id = 7, token_id = 9))
Token("pepper", (sentence_id = 1, word_id = 8, token_id = 10))
Token("##s", (sentence_id = 1, word_id = 8, token_id = 11))
julia> ibatch_with_TEB[2]
7-element Vector{TextEncodeBase.TokenStage}:
Token("Fu", (sentence_id = 2, word_id = 1, token_id = 1))
Token("##zzy", (sentence_id = 2, word_id = 1, token_id = 2))
Token("Wu", (sentence_id = 2, word_id = 2, token_id = 3))
Token("##zzy", (sentence_id = 2, word_id = 2, token_id = 4))
Token("was", (sentence_id = 2, word_id = 3, token_id = 5))
Token("a", (sentence_id = 2, word_id = 4, token_id = 6))
Token("bear", (sentence_id = 2, word_id = 5, token_id = 7))
=#
using TextEncodeBase: nestedcall, with_head_tail, trunc_and_pad, nested2batch
# construct `Vocab` with `WordPiece`
vocab = Vocab(wordpiece.vocab, wordpiece.vocab[wordpiece.unk_idx])
# define encoder with `TextEncoder`
enc = TextEncoder(
itkr, vocab,
nested2batch ∘ trunc_and_pad(nothing, vocab.unk) ∘ with_head_tail("[CLS]", "[SEP]") ∘ nestedcall(getvalue)
)
#=
julia> encode(enc, BatchSentence(texts))
28996x13x2 OneHotArray{28996, 3, Matrix{OneHot{0x00007144}}}:
[...]
julia> decode(enc, ans)
13×2 Matrix{String}:
"[CLS]" "[CLS]"
"Peter" "Fu"
"Piper" "##zzy"
"picked" "Wu"
"a" "##zzy"
"p" "was"
"##eck" "a"
"of" "bear"
"pick" "[SEP]"
"##led" "[UNK]"
"pepper" "[UNK]"
"##s" "[UNK]"
"[SEP]" "[UNK]"
=#
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 790 | module TextEncodeBase
using PartialFunctions
import WordTokenizers
using PrimitiveOneHot
using PrimitiveOneHot: OneHot
# tokenize
export AbstractTokenizer, AbstractTokenization
include("./utils.jl")
include("./base.jl")
include("./indexed.jl")
include("./match.jl")
include("./split.jl")
include("./tkrs.jl")
include("./batch.jl")
include("./macro.jl")
include("./normalize.jl")
include("./replace.jl")
# vocab
export AbstractVocabulary, Vocab, lookup, OneHot, OneHotArray
include("./lookupvector.jl")
include("./vocab.jl")
# reexport pipeline
using FuncPipelines
export Pipeline, Pipelines, PipeGet
# encode
export AbstractTextEncoder, TextEncoder, encode, decode, encode_indices, decode_indices, onehot_encode, decode_text
include("./encode.jl")
# utils
export matchsplits
end
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 13616 | using DataStructures: MutableLinkedList
# DataStructures.jl #883
# The current `append!` behavior for multiple iterable is `push!`, not `append!`
# So we overwrite this for correct behavior
function Base.append!(l::MutableLinkedList, elt)
for v in elt
push!(l, v)
end
return l
end
using WordTokenizers: rulebased_split_sentences, nltk_word_tokenize
"""
abstract type for tokenizers.
Each tokenizer is link with a tokenization (by
defining `tokenization(::Tokenizer) = Tokenization()`).
The overall framework dispatch on both tokenizer and
tokenization, but most of the time we only add methods
for tokenization. This allow further composability and
can interfere the tokenization process with given
tokenizer.
"""
abstract type AbstractTokenizer end
"""
abstract type for tokenization.
The tokenization procedure is separate into multiple
`TokenStages` and recursive calls of `splitting`, `wrap`,
and `tokenize`. `splitting` break string into substrings,
`wrap` mark the substrings with new `TokenStages`, and
`tokenize` is responsible for the tokenization.
"""
abstract type AbstractTokenization end
"""
abstract type for type that wrap input into specific stage for control tokenization.
There are six builtin stages in TextEncodeBase (all abstract XStage <: TokenStages):
1. Document <: DocumentStage: the input string is a full document,
and thus need to be splitted into multiple sentence.
2. Sentence <: SentenceStage: the input string is a full string,
and thus need to be splitted into multiple part (SubSentence/Word/Token).
3. SubSentence <: SubSentenceStage: special wrapper for case where the tokenizer
does not directly break sentence all into words/tokens and these pieces contain
multiple words/tokens, but you need the information that they are not full sentence.
4. Word <: WordStage: the input string is a single word.
5. SubWord <: SubWordStage: similar to SubSentence, but for word.
6. Token <: TokenStage: the final piece of the tokenization process.
Generally, it's used to specify the end of this piece and should
never be splitted.
Each wrapper have two field: `x` for the input, `meta` for extra information (`nothing` if not provided).
"""
abstract type TokenStages end
abstract type DocumentStage <: TokenStages end
abstract type SentenceStage <: TokenStages end
abstract type SubSentenceStage <: TokenStages end
abstract type WordStage <: TokenStages end
abstract type SubWordStage <: TokenStages end
abstract type TokenStage <: TokenStages end
struct Document{T, M} <: DocumentStage ; x::T; meta::M; end
struct Sentence{T, M} <: SentenceStage ; x::T; meta::M; end
struct SubSentence{T, M} <: SubSentenceStage ; x::T; meta::M; end
struct Word{T, M} <: WordStage ; x::T; meta::M; end
struct SubWord{T, M} <: SubWordStage ; x::T; meta::M; end
struct Token{T, M} <: TokenStage ; x::T; meta::M; end
Document(x) = Document(x, nothing)
Sentence(x) = Sentence(x, nothing)
SubSentence(x) = SubSentence(x, nothing)
Word(x) = Word(x, nothing)
SubWord(x) = SubWord(x, nothing)
Token(x) = Token(x, nothing)
getvalue(x::TokenStages) = x.x
getmeta(x::TokenStages) = x.meta
hasmeta(x::TokenStages) = !isnothing(getmeta(x))
setmeta(x::Document, meta) = Document(x.x, meta)
setmeta(x::Sentence, meta) = Sentence(x.x, meta)
setmeta(x::SubSentence, meta) = SubSentence(x.x, meta)
setmeta(x::Word, meta) = Word(x.x, meta)
setmeta(x::SubWord, meta) = SubWord(x.x, meta)
setmeta(x::Token, meta) = Token(x.x, meta)
setvalue(x::Document, y) = Document(y, x.meta)
setvalue(x::Sentence, y) = Sentence(y, x.meta)
setvalue(x::SubSentence, y) = SubSentence(y, x.meta)
setvalue(x::Word, y) = Word(y, x.meta)
setvalue(x::SubWord, y) = SubWord(y, x.meta)
setvalue(x::Token, y) = Token(y, x.meta)
updatemeta(::Nothing, meta) = meta
updatemeta(a::NamedTuple, meta::NamedTuple) = merge(a, meta)
updatevalue(f, x::TokenStages) = setvalue(x, f(getvalue(x)))
updatemeta(x::TokenStages, meta) = setmeta(x, updatemeta(getmeta(x), meta))
function Base.show(io::IO, t::TokenStages)
print(io, typeof(t).name.name)
vs = filter(!isnothing, ntuple(i->getfield(t, i), fieldcount(typeof(t))))
if length(vs) == 1
print(io, '(')
show(io, vs[1])
print(io, ')')
else
print(io, vs)
end
end
const ParentStages = Union{Nothing, TokenStages}
"""
splittability trait
The splittability trait decide whether the given combination (tokenizer x tokenization x stage) is
splittable or not (`Splittable` or `UnSplittable`). For example, `DefaultTokenization` and `SentenceStage`
is splittable (i.e. `splittability(::DefaultTokenization, ::SentenceStage) = Splittable()`). The splittability
change the behavior of `tokenize`: if it's splittable, `tokenize` will try to call `splitting` on the input,
`wrap` each splitting result and recurse. Otherwise, it will directly call `wrap` and then recurse into `tokenize`.
"""
abstract type Splittability end
struct Splittable <: Splittability end
struct UnSplittable <: Splittability end
"""
splittability(args...)
Return the splittability (`Splittable`/`UnSplittable`) of given argument combination.
Overload to make a `TokenStages` splittable.
"""
function splittability end
"""
splittable(args...)
Return `true` if the splittability of given argument combination is `Splittable()`.
"""
splittable(args...) = splittable(splittability(args...))
splittable(::Splittable) = true
splittable(::UnSplittable) = false
splitting(::typeof(splittability), args...) = splitting(splittability(args...), args...)
splitting(::Splittable, args...) = splitting(args...)
splitting(::UnSplittable, args...) = error("Argument is unsplittable: ", args)
# dispatch: tokenizer -> parent stage -> tokenization -> token stage
let ATR = AbstractTokenizer, AT = AbstractTokenization
# splittability: overload to make specific combination splittable
global @inline splittability(tkr::ATR, t::AT, x::TokenStages) = splittability(tkr, nothing, t, x)
global @inline splittability(tkr::ATR, s::ParentStages, t::AT, x::TokenStages) = splittability(s, t, x)
global @inline splittability(::ParentStages, t::AT, x::TokenStages) = splittability(t, x)
global @inline splittability(t::AT, x::TokenStages) = UnSplittable()
# after `Splittable`, define how to split it
global @inline splitting(::ATR, p::ParentStages, t::AT, x::TokenStages) = splitting(p, t, x)
global @inline splitting(p::ParentStages, t::AT, x::TokenStages) = splitting(t, x)
# a callback for `splitting`, `x` is the result of `splitting(::ATR, ::ParentStages, ::TokenStages)`
global @inline splitting(::ATR, p::ParentStages, t::AT, s::TokenStages, x) = splitting(p, t, s, x)
global @inline splitting(p::ParentStages, t::AT, s::TokenStages, x) = splitting(t, s, x)
global @inline splitting(::AT, ::TokenStages, x) = x
# splittable (4-arg): wrap the splitting result into specific `TokenStages`, e.g. "word" => Word("word")
global @inline wrap(::ATR, p::ParentStages, t::AT, s::TokenStages, x) = wrap(p, t, s, x)
global @inline wrap(p::ParentStages, t::AT, s::TokenStages, x) = wrap(t, s, x)
global @inline wrap(::AT, ::TokenStages, x::TokenStages) = x # already wrapped
# unsplittable (3-arg): transform the input into next `TokenStages`, e.g. Word("word") => Token("word")
global @inline wrap(::ATR, p::ParentStages, t::AT, x::TokenStages) = wrap(p, t, x)
global @inline wrap(p::ParentStages, t::AT, s::TokenStages) = wrap(t, s)
# the outer-most api, splitting input and recursively tokenize the result. ignore if input is empty
global @inline tokenize(tkr::ATR, t::AT, x::TokenStages) = tokenize(tkr, nothing, t, x)
global @inline tokenize(tkr::ATR, s::ParentStages, t::AT, x::TokenStages) = tokenize_procedure(tkr, s, t, x)
global @inline tokenize(tkr::ATR, s::ParentStages, t::AT, x::TokenStage) = isempty(getvalue(x)) ? TokenStage[] : TokenStage[wrap(tkr, s, t, x)]
end
"""
tokenization_procedure(tokenizer, tokenizaton, stage)
The procedure of tokenization (`splitting` + `wrap` + `tokenize`).
"""
@inline tokenize_procedure(tkr, t, x) = tokenize_procedure(tkr, t, nothing, x)
@inline tokenize_procedure(tkr, s, t, x) = collect(tokenize_procedure!(append!, splittability, MutableLinkedList{TokenStage}(), tkr, s, t, x))
@inline tokenize_procedure!(op, v, tkr, s, t, x) = tokenize_procedure!(op, splittability, v, tkr, s, t, x)
@inline tokenize_procedure!(op, ::typeof(splittability), v, tkr, s, t, x) = tokenize_procedure!(op, splittability(tkr, s, t, x), v, tkr, s, t, x)
function tokenize_procedure!(op, ::Splittable, v, tkr, s, t, x)
isempty(getvalue(x)) && return v
for sp in splitting(tkr, s, t, x, splitting(splittability, tkr, s, t, x))
v1 = tokenize(tkr, x, t, wrap(tkr, s, t, x, sp))
op(v, v1)
end
return v
end
function tokenize_procedure!(op, ::UnSplittable, v, tkr, s, t, x)
isempty(getvalue(x)) && return v
op(v, tokenize(tkr, x, t, wrap(tkr, s, t, x)))
return v
end
"""
splitting(t::AbstractTokenization, x::TokenStages)
Split `x` given its tokenization stage. For example,
the default behavior of a document stage is splitting into
sentences (with `WordTokenizers.split_sentences`).
Overload this method for custom tokenization.
"""
function splitting end
"""
wrap(t::AbstractTokenization, s::TokenStages, x)
Mark the tokenization stage of `x`, which is part of the splitting result of `s`.
For example, if we are doing simple whitespace tokenization and at the sentence stage,
then `x` is just single word of `s` and thus return `Word(x)` (or `Token(x)`).
Skip if `x` is already a `TokenStages`. (this method only apply to splittable stages)
Overload this method to control the tokenization process.
"""
function wrap end
@eval $((@macroexpand @doc """
wrap(t::AbstractTokenization, x::TokenStages)
A handler for unsplittable stages (token/word/...).
Overload this method for custom transform.
"""
function wrap(t::AbstractTokenization, x::TokenStages) end
).args[2])
# abstract type for convenience
abstract type BaseTokenization <: AbstractTokenization end
struct DefaultTokenization <: BaseTokenization end
splittability(::BaseTokenization, x::Union{DocumentStage, SentenceStage, SubSentenceStage}) = Splittable()
splittability(::BaseTokenization, x::Union{WordStage, SubWordStage}) = UnSplittable()
splitting(::BaseTokenization, d::DocumentStage) = rulebased_split_sentences(getvalue(d))
splitting(::BaseTokenization, s::SentenceStage) = nltk_word_tokenize(getvalue(s))
splitting(::BaseTokenization, s::SubSentenceStage) = nltk_word_tokenize(getvalue(s))
wrap(::BaseTokenization, d::DocumentStage, x) = Sentence(x, getmeta(d))
wrap(::BaseTokenization, s::SentenceStage, x) = Word(x, getmeta(s))
wrap(::BaseTokenization, s::SubSentenceStage, x) = Word(x, getmeta(s))
wrap(::BaseTokenization, w::WordStage, x) = SubWord(x, getmeta(w))
wrap(::BaseTokenization, w::WordStage) = Token(getvalue(w), getmeta(w))
wrap(::BaseTokenization, w::SubWordStage) = Token(getvalue(w), getmeta(w))
wrap(::BaseTokenization, t::TokenStage) = t
abstract type WrappedTokenization{T<:AbstractTokenization} <: AbstractTokenization end
base(t::WrappedTokenization) = t.base
splittability(p::ParentStages, t::WrappedTokenization, x::TokenStages) = splittability(p, base(t), x)
splitting(p::ParentStages, t::WrappedTokenization, x::TokenStages) = splitting(p, base(t), x)
splitting(p::ParentStages, t::WrappedTokenization, s::TokenStages, x) = splitting(p, base(t), s, x)
wrap(p::ParentStages, t::WrappedTokenization, s::TokenStages, x) = wrap(p, base(t), s, x)
wrap(p::ParentStages, t::WrappedTokenization, s::TokenStages) = wrap(p, base(t), s)
# tokenizer api
"""
tokenization(::AbstractTokenizer) :: AbstractTokenization
Return the tokenization object of given tokenizer.
"""
tokenization(::AbstractTokenizer) = DefaultTokenization()
"""
preprocess(tkr::AbstractTokenizer, x)
Preprocess the input `x`. This is only called during `tkr(x)`.
"""
preprocess(t::AbstractTokenizer, x::TokenStages) = updatevalue(Base.Fix1(preprocess, t), x)
preprocess(t::AbstractTokenizer, x) = x
(t::AbstractTokenizer)(x::TS) where {TS <: TokenStages} = tokenize(t, nothing, tokenization(t), preprocess(t, x))
function Base.show(io::IO, t::AbstractTokenizer)
T = typeof(t)
n = fieldcount(T)
print(io, nameof(T))
if n != 0
base = hasfield(T, :tokenization)
basei = base ? findfirst(==(:tokenization), fieldnames(T)) : 0
print(io, '(')
base && show(io, t.tokenization)
for i = 1:n
i == basei && continue
(!base && i == 1) || print(io, ", ")
print(io, fieldname(T, i))
print(io, " = ")
show(IOContext(io, :limit=>true), getfield(t, i))
end
print(io, ')')
end
end
function Base.show(io::IO, t::AbstractTokenization)
T = typeof(t)
n = fieldcount(T)
print(io, nameof(T))
if n != 0
base = hasfield(T, :base)
basei = base ? findfirst(==(:base), fieldnames(T)) : 0
print(io, '(')
base && show(io, t.base)
for i = 1:n
i == basei && continue
(!base && i == 1) || print(io, ", ")
print(io, fieldname(T, i))
print(io, " = ")
show(IOContext(io, :limit=>true), getfield(t, i))
end
print(io, ')')
end
end
Base.show(io::IO, ::DefaultTokenization) = print(io, :default)
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 1544 | using DataStructures: MutableLinkedList
struct Batch{S, A<:AbstractVector, M} <: TokenStages
x::A
meta::M
end
Batch{S}(x, meta = nothing) where S = Batch{S, typeof(x), typeof(meta)}(x, meta)
setmeta(x::Batch{S}, meta) where S = Batch{S}(x.x, meta)
setvalue(x::Batch{S}, y) where S = Batch{S}(y, x.meta)
function Base.show(io::IO, x::Batch{S}) where S
print(io, "Batch{", S, "}(", x.x)
isnothing(x.meta) || print(io, ", ", x.meta)
print(io, ')')
end
splittability(::BaseTokenization, ::Batch) = Splittable()
splitting(::BaseTokenization, s::Batch) = s.x
wrap(::BaseTokenization, b::Batch{S}, x) where S = S(x, getmeta(b))
# nested
tokenize(tkr::NestedTokenizer, p::ParentStages, t::AbstractTokenization, x::Batch{Document}) = collect(tokenize_procedure!(push!, MutableLinkedList{Vector{Vector{TokenStage}}}(), tkr, p, t, x))
tokenize(tkr::NestedTokenizer, p::ParentStages, t::AbstractTokenization, x::Batch{Sentence}) = collect(tokenize_procedure!(push!, MutableLinkedList{Vector{TokenStage}}(), tkr, p, t, x))
# indexed
splitting(p::ParentStages, t::IndexedTokenization, b::Batch{Sentence}, x) = enumerate(splitting(p, t.base, b, x))
wrap(p::ParentStages, t::IndexedTokenization, b::Batch{Sentence}, (i, x)) = updatemeta(wrap(p, t.base, b, x), (sentence_id = i,))
splitting(p::ParentStages, t::IndexedTokenization, b::Batch{Document}, x) = enumerate(splitting(p, t.base, b, x))
wrap(p::ParentStages, t::IndexedTokenization, b::Batch{Document}, (i, x)) = updatemeta(wrap(p, t.base, b, x), (document_id = i,))
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 3452 | _larger_type(a, b) = sizeof(a) >= sizeof(b) ? a : b
function codesize(crs)
T = UInt8
for cr in crs
x = UInt32(last(cr))
xT = if x > UInt(typemax(UInt16))
UInt32
elseif x > UInt(typemax(UInt8))
UInt16
else
UInt8
end
T = _larger_type(T, xT)
end
return T
end
const CodeRangeT = Union{Integer, Char, UnitRange, StepRange}
const CodeRangeMap = Union{Tuple{CodeRangeT, CodeRangeT}, Pair{<:CodeRangeT, <:CodeRangeT}}
function _code_range(f, t)
fr = _code_range(f)
tr = _code_range(t)
@assert length(fr) == length(tr) "codemap of two range with different length: $(length(fr)) != $(length(tr))"
return (fr, tr)
end
_code_range(r::UnitRange) = Char(r.start):Char(r.stop)
_code_range(r::StepRange) = StepRange(Char(r.start), Int(r.step), Char(r.stop))
_code_range(r::StepRange{Char}) = StepRange(r.start, Int(r.step), r.stop)
_code_range(r::StepRange{Char, Int}) = r
_code_range(c::Integer) = _code_range(Char(c))
_code_range(c::Char) = c:c
code_range(arg::CodeRangeMap) = _code_range(arg[1], arg[2])
struct CodeMap{F, T}
from::Vector{StepRange{Char, Int}}
to::Vector{StepRange{Char, Int}}
function CodeMap(
from::Vector{StepRange{Char, Int}},
to::Vector{StepRange{Char, Int}},
)
From = codesize(from)::Type{<:Union{UInt8, UInt16, UInt32}}
To = codesize(to)::Type{<:Union{UInt8, UInt16, UInt32}}
@assert length(from) == length(to) "different number of code ranges: $(length(from)) != $(length(to))"
return new{From, To}(from, to)
end
end
(cm::CodeMap)(x) = codemap(cm, x)
CodeMap(args::CodeRangeMap...) = CodeMap(args)
function CodeMap(args::Union{Tuple, AbstractVector})
len = length(args)
from = Vector{StepRange{Char, Int}}(undef, len)
to = Vector{StepRange{Char, Int}}(undef, len)
for (i, arg) in enumerate(args)
from[i], to[i] = code_range(arg)
end
return CodeMap(from, to)
end
struct CodeUnMap{F, T}
codemap::CodeMap{F, T}
end
(um::CodeUnMap)(x) = codeunmap(um.codemap, x)
function find_code(rs, c)
@inbounds for (i, r) in enumerate(rs)
j = findfirst(==(c), r)
isnothing(j) && continue
return (i, j)
end
return nothing
end
function codemap(cm::CodeMap{F, T}, c::Char) where {F, T}
I = find_code(cm.from, c)
x = isnothing(I) ? c : cm.to[I[1]][I[2]]
return T(x)
end
codemap(cm::CodeMap, x::Integer) = codemap(cm, Char(x))
codemap(cm::CodeMap{F}, x::AbstractString) where F =
transcode(String, map(Base.Fix1(codemap, cm), transcode(F, codeunits(x))))
function codeunmap(cm::CodeMap{F, T}, c::Char) where {F, T}
I = find_code(cm.to, c)
x = isnothing(I) ? c : cm.from[I[1]][I[2]]
return F(x)
end
codeunmap(cm::CodeMap, x::Integer) = codeunmap(cm, Char(x))
codeunmap(cm::CodeMap{F, T}, x::AbstractString) where {F, T} =
transcode(String, map(Base.Fix1(codeunmap, cm), transcode(T, codeunits(x))))
Base.:(==)(a::CodeMap, b::CodeMap) = a.from == b.from && a.to == b.to
Base.:(==)(a::CodeUnMap, b::CodeUnMap) = a.codemap == b.codemap
function Base.show(io::IO, cm::CodeMap{F,T}) where {F, T}
print(io, "CodeMap{", F, " => ", T, '}')
print(io, '(', length(cm.to), " code-ranges)")
end
function Base.show(io::IO, um::CodeUnMap{F, T}) where {F, T}
print(io, "CodeUnMap{", F, " <= ", T, '}')
print(io, '(', length(um.codemap.to), " code-ranges)")
end
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 2880 | using StructWalk
abstract type AbstractTextEncoder end
"""
process(::AbstractTextEncoder)
Get processing function of given encoder.
"""
process(e::AbstractTextEncoder) = e.process
process(::Type{AbstractTextEncoder}) = nestedcall(getvalue)
"""
tokenize(e::AbstractTextEncoder, x)
Use encoder's tokenizer to tokenize `x`.
"""
tokenize(e::AbstractTextEncoder, x) = e.tokenizer(x)
"""
process(e::AbstractTextEncoder, x)
Use encoder's processing function to process `x`.
"""
process(e::AbstractTextEncoder, x) = process(e)(x)
"""
onehot_encode(e::AbstractTextEncoder, x)
Lookup `x` in encoder's vocabulary. Return one-hot encoded vectors.
"""
onehot_encode(e::AbstractTextEncoder, x) = lookup(OneHot, e.vocab, x)
"""
lookup(e::AbstractTextEncoder, x)
Lookup `x`. This is basically [`onehot_encode`](@ref) but can be overloaded for extra processing.
"""
lookup(e::AbstractTextEncoder, x) = onehot_encode(e, x)
"""
encode_indices(e::AbstractTextEncoder, x)
Encode for indices. Encode `x` without calling `lookup` bound with `e`.
"""
encode_indices(e::AbstractTextEncoder, x) = process(e, tokenize(e, x))
"""
encode(e::AbstractTextEncoder, x)
Encode `x`.
"""
encode(e::AbstractTextEncoder, x) = lookup(e, encode_indices(e, x))
"""
decode_indices(e::AbstractTextEncoder, x)
Decode from indices. Decode `x` by reverse lookup `x` in `e.vocab`.
"""
decode_indices(e::AbstractTextEncoder, x) = lookup(eltype(e.vocab), e.vocab, x)
"""
decode(e::AbstractTextEncoder, x)
Decode `x`. This is basically [`decode_indices`](@ref) but can be overloaded for post-processing.
"""
decode(e::AbstractTextEncoder, x) = decode_indices(e, x)
"""
decode_text(e::AbstractTextEncoder, x)
Decode `x` into texts. This is basically [`join_text`](@ref) with [`decode`](@ref) but can be overloaded
for post-processing.
"""
decode_text(e::AbstractTextEncoder, x) = join_text(decode(e, x))
"""
TextEncoder(tokenizer, vocab, process = nestedcall(getvalue))
A simple encoder implementation.
"""
struct TextEncoder{T<:AbstractTokenizer, V<:AbstractVocabulary, P} <: AbstractTextEncoder
tokenizer::T
vocab::V
process::P
end
TextEncoder(tkr::AbstractTokenizer, vocab::AbstractVocabulary) = TextEncoder(tkr, vocab, process(AbstractTextEncoder))
TextEncoder(builder, tkr::AbstractTokenizer, vocab::AbstractVocabulary) = TextEncoder(builder, TextEncoder(tkr, vocab))
"""
TextEncoder(builder, e::TextEncoder)
Given an encoder, return a new encoder that has the same tokenizer and vocabulary. `builder` is
a function that take a encoder and return a new processing function.
"""
TextEncoder(builder, e::TextEncoder) = TextEncoder(e.tokenizer, e.vocab, builder(e))
StructWalk.children(::TokenizerStyle, x::AbstractTextEncoder) = StructWalk.children(WalkStyle, x)
StructWalk.iscontainer(::TokenizerStyle, x::AbstractTextEncoder) = false
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 1912 | using Base.Iterators: repeated
mutable struct Offsets
word::Int
token::Int
end
struct IndexedTokenization{T<:AbstractTokenization} <: WrappedTokenization{T}
base::T
end
IndexedTokenization() = IndexedTokenization(DefaultTokenization())
_offsets(s, w=0, t=0) = (meta = getmeta(s); hasmeta(s) && haskey(getmeta(s), :offsets) ? meta.offsets : Offsets(w,t))
@inline splitting(p::ParentStages, t::IndexedTokenization, s::TokenStages, x) = zip(repeated(_offsets(s)), Iterators.Flatten((true, Iterators.repeated(false))), splitting(p, t.base, s, x))
@inline splitting(p::ParentStages, t::IndexedTokenization, d::DocumentStage, x) = enumerate(splitting(p, t.base, d, x))
@inline wrap(p::ParentStages, t::IndexedTokenization, s::TokenStages, (i, f, x)) = updatemeta(wrap(p, t.base, s, x), (offsets = i, isfirst = f))
@inline wrap(p::ParentStages, t::IndexedTokenization, d::DocumentStage, (i, x)) = updatemeta(wrap(p, t.base, d, x), (sentence_id = i,))
@inline wrap(p::ParentStages, t::IndexedTokenization, s::TokenStages) = wrap(p, t.base, s)
function wrap(p::ParentStages, t::IndexedTokenization, w::SubWordStage)
meta = getmeta(w)
if hasmeta(w) && haskey(meta, :offsets)
offsets = meta.offsets
word_id = meta.isfirst ? (offsets.word += 1) : offsets.word
else
word_id = 1
end
return updatemeta(wrap(p, t.base, w), (word_id = word_id,))
end
function wrap(p::ParentStages, t::IndexedTokenization, x::TokenStage)
x = wrap(p, t.base, x)
meta = getmeta(x)
if hasmeta(x) && haskey(meta, :offsets)
offsets = meta.offsets
word_id = haskey(meta, :word_id) ? meta.word_id : (offsets.word += 1)
token_id = offsets.token += 1
meta = Base.structdiff(meta, NamedTuple{(:offsets, :isfirst)})
else
word_id = token_id = 1
end
return setmeta(x, updatemeta(meta, (word_id = word_id, token_id = token_id)))
end
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 7257 | import DoubleArrayTries
using DoubleArrayTries: DoubleArrayTrie
lookup(dat::DoubleArrayTrie, i::Integer) = DoubleArrayTries.decode(dat, i)
lookup(dat::DoubleArrayTrie, k::Union{AbstractString, AbstractVector{UInt8}}) = DoubleArrayTries.lookup(dat, k)
abstract type LookupDict{T} <: AbstractDict{T, Int} end
LookupDict(list::AbstractVector) = LookupDict(keytype(list), list)
LookupDict(::Type{<:AbstractString}, list) = DATLookupDict(list)
LookupDict(::Type, list) = DictBackedLookupDict(list)
struct DATLookupDict{V <: Union{AbstractVector{UInt64}, AbstractDict{Int, UInt64}}} <: LookupDict{String}
trie::DoubleArrayTrie
uid2idx::DoubleArrayTries.CVector
idx2uid::V
end
function DATLookupDict(list::AbstractVector{<:AbstractString})
@assert allunique(list) "All element should be unique"
sortedlist = sort(list)
trie = DoubleArrayTrie(sortedlist)
uid2idx = Vector{Int}(undef, length(list))
idx2uid = Vector{Int}(undef, length(list))
@inbounds for (i, str) in enumerate(list)
uid = lookup(trie, str)
uid2idx[uid] = i
idx2uid[i] = uid
end
return DATLookupDict(trie, DoubleArrayTries.CVector(uid2idx), DoubleArrayTries.CVector(idx2uid))
end
uid2idx(d::DATLookupDict, uid) = @inbounds Int(d.uid2idx[uid])
idx2uid(d::DATLookupDict, idx) = @inbounds Int(d.idx2uid[idx])
Base.length(d::DATLookupDict) = length(d.trie)
function Base.get(d::DATLookupDict, k::Union{AbstractString, AbstractVector{UInt8}}, v)
uid = lookup(d.trie, k)
uid == 0 && return v
return uid2idx(d, uid)
end
function Base.iterate(d::DATLookupDict, state = nothing)
it = iterate(d.trie, state)
isnothing(it) && return nothing
(key, uid), nstate = it
val = uid2idx(d, uid)
return key => val, nstate
end
lookup_index(d::DATLookupDict, unki, word) = get(d, word, unki)
function lookup_word(d::DATLookupDict, unk, index)
if d.idx2uid isa AbstractVector
checkbounds(Bool, d.idx2uid, index) || return unk
uid = @inbounds d.idx2uid[index]
else
isempty(d.idx2uid) && return unk
uid = get(d.idx2uid, index, zero(UInt64))
end
return iszero(uid) ? unk : lookup(d.trie, uid)
end
struct DictBackedLookupDict{T, D <: AbstractDict{T, Int},
V <: Union{AbstractVector{T}, AbstractDict{Int, T}}} <: LookupDict{T}
dict::D
list::V
end
function DictBackedLookupDict(list::AbstractVector)
@assert allunique(list) "All element should be unique"
dict = Dict{eltype(list), Int}()
@inbounds for (i, val) in enumerate(list)
dict[val] = i
end
return DictBackedLookupDict(dict, list)
end
Base.length(d::DictBackedLookupDict) = length(d.dict)
Base.get(d::DictBackedLookupDict, k, v) = get(d.dict, k, v)
Base.iterate(d::DictBackedLookupDict, state...) = iterate(d.dict, state...)
lookup_index(d::DictBackedLookupDict, unki, word) = isempty(d.dict) ? unki : get(d, word, unki)
function lookup_word(d::DictBackedLookupDict, unk, index)
if d.list isa AbstractVector
checkbounds(Bool, d.list, index) || return unk
return @inbounds(d.list[index])
else
return isempty(d.list) ? unk : get(d.list, index, unk)
end
end
abstract type LookupVector{T} <: AbstractVector{T} end
LookupVector(list::AbstractVector) = LookupVector(eltype(list), list)
LookupVector(::Type{<:AbstractString}, list) = DATLookupVector(list)
LookupVector(::Type, list) = DictBackedLookupVector(list)
struct DATLookupVector{D <: DATLookupDict} <: LookupVector{String}
dict::D
end
basedict(v::DATLookupVector) = v.dict
DATLookupVector(vector::AbstractVector) = DATLookupVector(DATLookupDict(vector))
struct DictBackedLookupVector{T, D <: LookupDict{T}} <: LookupVector{T}
dict::D
end
basedict(v::DictBackedLookupVector) = v.dict
DictBackedLookupVector(vector::AbstractVector) = DictBackedLookupVector(DictBackedLookupDict(vector))
Base.length(v::LookupVector) = length(basedict(v))
Base.size(v::LookupVector) = (length(v),)
Base.checkbounds(::Type{Bool}, v::LookupVector, i) = !isnothing(lookup_word(v, nothing, i))
function Base.getindex(v::LookupVector, i::Integer)
k = lookup_word(v, nothing, i)
@boundscheck isnothing(k) && throw(BoundsError(v, i))
return k
end
lookup_index(v::LookupVector, unki, word) = lookup_index(basedict(v), unki, word)
lookup_word(v::LookupVector, unk, index) = lookup_word(basedict(v), unk, index)
struct OverwritableLookupVector{T, V <: LookupVector{T}, D <: DictBackedLookupDict{T}} <: LookupVector{T}
vector::V
dict::D
end
OverwritableLookupVector(vector::AbstractVector) = OverwritableLookupVector(LookupVector(vector))
function OverwritableLookupVector(vector::LookupVector)
T = eltype(vector)
dict = DictBackedLookupDict(Dict{T, Int}(), Dict{Int, T}())
return OverwritableLookupVector(vector, dict)
end
Base.length(v::OverwritableLookupVector) = length(v.vector)
function lookup_index(v::OverwritableLookupVector, unki, word)
i = lookup_index(v.dict, 0, word)
iszero(i) || return i
i = lookup_index(v.vector, 0, word)
iszero(i) && return unki
return isnothing(lookup_word(v.dict, nothing, i)) ? i : unki
end
function lookup_word(v::OverwritableLookupVector, unk, index)
k = lookup_word(v.dict, nothing, index)
return isnothing(k) ? lookup_word(v.vector, unk, index) : k
end
function Base.setindex!(v::OverwritableLookupVector, val, i::Integer)
@boundscheck checkbounds(v, i)
@assert iszero(lookup_index(v, 0, val)) "Element must be unique, value $(repr(val)) already in the lookup vector"
k = lookup_word(v.dict, nothing, i)
isnothing(k) || delete!(v.dict.dict, k)
v.dict.dict[val] = i
v.dict.list[i] = val
return v
end
function Base.setindex!(v::OverwritableLookupVector, val, k)
i = lookup_index(v, 0, k)
iszero(i) && throw(KeyError(k))
return v[i] = val
end
struct PerforatedOverwritableLookupVector{T, V <: LookupVector{T}, D <: DictBackedLookupDict{T}} <: LookupVector{T}
vector::V
dict::D
end
Base.length(v::PerforatedOverwritableLookupVector) = max(length(v.vector), maximum(keys(v.dict.list)))
function Base.getindex(v::PerforatedOverwritableLookupVector, i::Integer)
k = lookup_word(v, nothing, i)
isnothing(k) && throw(UndefRefError())
return k
end
function lookup_index(v::PerforatedOverwritableLookupVector, unki, word)
i = lookup_index(v.dict, 0, word)
iszero(i) || return i
i = lookup_index(v.vector, 0, word)
iszero(i) && return unki
return isnothing(lookup_word(v.dict, nothing, i)) ? i : unki
end
function lookup_word(v::PerforatedOverwritableLookupVector, unk, index)
k = lookup_word(v.dict, nothing, index)
return isnothing(k) ? lookup_word(v.vector, unk, index) : k
end
function Base.setindex!(v::PerforatedOverwritableLookupVector, val, i::Integer)
@assert iszero(lookup_index(v, 0, val)) "Element must be unique, value $(repr(val)) already in the lookup vector"
k = lookup_word(v.dict, nothing, i)
isnothing(k) || delete!(v.dict.dict, k)
v.dict.dict[val] = i
v.dict.list[i] = val
return v
end
function Base.setindex!(v::PerforatedOverwritableLookupVector, val, k)
i = lookup_index(v, 0, k)
iszero(i) && throw(KeyError(k))
return v[i] = val
end
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 1887 | using Base.Meta: isexpr
function stagem(sig, abst)
abst = esc(abst)
if sig isa Symbol
name = sig
structdef = :(
struct $name{T, M} <: $abst
x::T
meta::M
end
)
elseif sig isa Expr
if isexpr(sig, :curly)
length(sig.args) != 3 && error("invalid TokenStages definition")
name, Tsig, Msig = sig.args
T = isexpr(Tsig, :<:) ? Tsig.args[1] : Tsig
M = isexpr(Msig, :<:) ? Msig.args[1] : Msig
structdef = :(
struct $sig <: $abst
x::$T
meta::$M
end
)
else
error("invalid TokenStages definition")
end
else
error("invalid TokenStages definition")
end
name = esc(name)
setmeta = esc(:(TextEncodeBase.setmeta))
setvalue = esc(:(TextEncodeBase.setvalue))
return quote
$structdef
$name(x) = $name(x, nothing)
$setmeta(x::$name, meta) = $name(x.x, meta)
$setvalue(x::$name, y) = $name(y, x.meta)
end
end
"""
@stage StageName
@stage StageName{A<:SomeType, B}
@stage StageName AbstractStage
@stage StageName{A<:SomeType, B} <: AbstractStage
Define `TokenStages` with two field (`x` and `meta`), it's single arguement constructor,
and add methods to `setmeta` and `setvalue`.
Equivalent to:
```julia
struct StageName{A<:SomeType, B} <: AbstractStage
x::A
meta::B
end
StageName(x) = StageName(x, nothing)
TextEncodeBase.setmeta(x::StageName, meta) = StageName(x.x, meta)
TextEncodeBase.setvalue(x::StageName, y) = StageName(y, x.meta)
```
"""
macro stage(sig, abst=:TokenStages)
if isexpr(sig, :<:)
abst != :TokenStages && error("invalid TokenStages definition")
sig, abst = sig.args
end
return stagem(sig, abst)
end
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 1972 | struct MatchTokenization{T<:AbstractTokenization, P <: AbstractPattern} <: WrappedTokenization{T}
base::T
patterns::Vector{P}
MatchTokenization(base::T, patterns::Vector{P}) where {T <: AbstractTokenization, P <: AbstractPattern} = MatchTokenization{T}(base, patterns)
function MatchTokenization{T}(base::T, patterns::Vector{P}) where {T <: AbstractTokenization, P <: AbstractPattern}
return new{T, P}(base, patterns)
end
end
MatchTokenization(patterns) = MatchTokenization(DefaultTokenization(), patterns)
MatchTokenization(base, patterns) = MatchTokenization(base, map(as_match, patterns))
Base.:(==)(a::MatchTokenization, b::MatchTokenization) = a.base == b.base && a.patterns == b.patterns
@inline splitting(p::ParentStages, t::MatchTokenization, x::SubSentence) = splitting(p, t.base, Sentence(getvalue(x), getmeta(x)))
@inline splitting(p::ParentStages, t::MatchTokenization, s::SentenceStage) = matchsplits(t.patterns, getvalue(s))
@inline function wrap(p::ParentStages, t::MatchTokenization, s::SentenceStage, (istoken, x))
meta = updatemeta(getmeta(s), (ismatch = istoken,))
return istoken ? Token(x, meta) : SubSentence(x, meta)
end
@inline wrap(p::ParentStages, t::MatchTokenization, s::TokenStages, x) = wrap(p, t.base, s, x)
@inline wrap(p::ParentStages, t::MatchTokenization, x::TokenStages) = wrap(p, t.base, x)
# calling directly on word should check if any match exists
splittability(::Nothing, ::MatchTokenization, ::WordStage) = Splittable()
@inline splitting(::Nothing, t::MatchTokenization, w::WordStage) = matchsplits(t.patterns, getvalue(w))
@inline function wrap(::Nothing, t::MatchTokenization, w::WordStage, (istoken, x))
meta = updatemeta(getmeta(w), (ismatch = istoken,))
return istoken ? Token(x, meta) : Word(x, meta)
end
# show
function Base.show(io::IO, t::MatchTokenization)
print(io, "MatchTokenization(")
show(io, t.base)
print(io, ", ", length(t.patterns), " patterns)")
end
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 4265 | abstract type TextNormalizer{T<:AbstractTokenization} <: WrappedTokenization{T} end
abstract type SentenceNormalizer{T<:AbstractTokenization} <: TextNormalizer{T} end
abstract type WordNormalizer{T<:AbstractTokenization} <: TextNormalizer{T} end
function normalizer end
# perform normalization on sentence level.
splitting(p::ParentStages, t::SentenceNormalizer, x::Union{SentenceStage, SubSentenceStage}) = splitting(p, base(t), updatevalue(normalizer(t), x))
# directly passing unsplittable should also be normalized, except token.
wrap(::Nothing, t::SentenceNormalizer, s::TokenStages) = wrap(nothing, base(t), updatevalue(normalizer(t), s))
wrap(::Nothing, t::SentenceNormalizer, s::TokenStage) = wrap(nothing, base(t), s)
# perform normalization on word level.
wrap(p::TokenStages, t::WordNormalizer, s::WordStage) = wrap(p, base(t), updatevalue(normalizer(t), s))
# if word is splitable
splitting(p::ParentStages, t::WordNormalizer, x::WordStage) = splitting(p, base(t), updatevalue(normalizer(t), x))
# directly passing unsplittable should also be normalized, except token.
wrap(::Nothing, t::WordNormalizer, s::TokenStages) = wrap(nothing, base(t), updatevalue(normalizer(t), s))
wrap(::Nothing, t::WordNormalizer, s::TokenStage) = wrap(nothing, base(t), s)
### lower case
struct LowercaseNormalizer{T<:AbstractTokenization} <: SentenceNormalizer{T}
base::T
end
LowercaseNormalizer() = LowercaseNormalizer(DefaultTokenization())
normalizer(t::LowercaseNormalizer) = lowercase
### Unicode
include("./unicode.jl")
struct UnicodeNormalizer{T<:AbstractTokenization} <: SentenceNormalizer{T}
base::T
flags::Int
end
UnicodeNormalizer(base::AbstractTokenization, normalform::Symbol) = UnicodeNormalizer(base, _utf8proc_flags(normalform))
UnicodeNormalizer(base::AbstractTokenization; kw...) = UnicodeNormalizer(base, _utf8proc_flags(; kw...))
UnicodeNormalizer(normalform::Symbol) = UnicodeNormalizer(DefaultTokenization(), normalform)
UnicodeNormalizer(; kw...) = UnicodeNormalizer(DefaultTokenization(); kw...)
normalizer(t::UnicodeNormalizer) = Base.Fix2(utf8proc_map, t.flags)
function Base.show(io::IO, t::UnicodeNormalizer)
nfs = (:NFC, :NFD, :NFKC, :NFKD)
idx = findfirst(==(t.flags), map(_utf8proc_flags, nfs))
if isnothing(idx)
print(io, "UnicodeNormalizer(")
show(io, base(t))
_show_utf8proc_flags(io, t.flags)
print(io, ')')
else
name = nfs[idx]
print(io, name, '(')
show(io, base(t))
print(io, ')')
end
end
### replace
struct SentenceReplaceNormalizer{T<:AbstractTokenization, P<:Pair} <: SentenceNormalizer{T}
base::T
pattern::P
end
SentenceReplaceNormalizer(pattern) = SentenceReplaceNormalizer(DefaultTokenization(), pattern)
normalizer(t::SentenceReplaceNormalizer) = Base.Fix2(replace, t.pattern)
struct WordReplaceNormalizer{T<:AbstractTokenization, P<:Pair} <: WordNormalizer{T}
base::T
pattern::P
end
WordReplaceNormalizer(pattern) = WordReplaceNormalizer(DefaultTokenization(), pattern)
normalizer(t::WordReplaceNormalizer) = Base.Fix2(replace, t.pattern)
const ReplaceNormalizer = SentenceReplaceNormalizer
### general function
struct SentenceFuncNormalizer{T<:AbstractTokenization, F} <: SentenceNormalizer{T}
base::T
func::F
end
SentenceFuncNormalizer(func) = SentenceFuncNormalizer(DefaultTokenization(), func)
normalizer(t::SentenceFuncNormalizer) = t.func
struct WordFuncNormalizer{T<:AbstractTokenization, F} <: WordNormalizer{T}
base::T
func::F
end
WordFuncNormalizer(func) = WordFuncNormalizer(DefaultTokenization(), func)
normalizer(t::WordFuncNormalizer) = t.func
### Codemap
include("./codemap.jl")
struct CodeNormalizer{T<:AbstractTokenization, C <: CodeMap} <: WordNormalizer{T}
base::T
codemap::C
end
CodeNormalizer(codemap::CodeMap) = CodeNormalizer(DefaultTokenization(), codemap)
CodeNormalizer(base::AbstractTokenization, code_range, code_ranges...) = CodeNormalizer(base, CodeMap(code_range, code_ranges...))
CodeNormalizer(code_range, code_ranges...) = CodeNormalizer(CodeMap(code_range, code_ranges...))
TextEncodeBase.normalizer(t::CodeNormalizer) = t.codemap
Base.:(==)(a::CodeNormalizer, b::CodeNormalizer) = a.base == b.base && a.codemap == b.codemap
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 493 | using StructWalk
using StructWalk: WalkStyle
struct TokenizerStyle <: WalkStyle end
StructWalk.children(::TokenizerStyle, x) = ()
StructWalk.iscontainer(::TokenizerStyle, x) = false
StructWalk.children(::TokenizerStyle, x::AbstractTokenizer) = StructWalk.children(WalkStyle, x)
StructWalk.children(::TokenizerStyle, x::AbstractTokenization) = StructWalk.children(WalkStyle, x)
Base.replace(f::Function, x::Union{AbstractTokenizer, AbstractTokenization}) = postwalk(f, TokenizerStyle(), x)
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 1296 | struct EachSplitTokenization{S} <: BaseTokenization
splitter::S
end
@static if VERSION < v"1.8"
splitting(t::EachSplitTokenization, s::SentenceStage) = split(getvalue(s), t.splitter; keepempty = false)
else
splitting(t::EachSplitTokenization, s::SentenceStage) = eachsplit(getvalue(s), t.splitter; keepempty = false)
end
struct EachMatchTokenization{P<:AbstractPattern} <: BaseTokenization
pattern::P
EachMatchTokenization(r::Regex) = new{Regex}(Base.compile(r))
EachMatchTokenization(r::AbstractPattern) = new{typeof(r)}(r)
end
EachMatchTokenization(r) = EachMatchTokenization(as_match(r))
splitting(t::EachMatchTokenization, s::SentenceStage) = FindAllIterator(t.pattern, getvalue(s))
struct MatchSplitsTokenization{P <: Union{AbstractPattern, Vector{<:AbstractPattern}}} <: BaseTokenization
pattern::P
MatchSplitsTokenization(r::Regex) = new{Regex}(Base.compile(r))
MatchSplitsTokenization(r::Union{AbstractPattern, Vector{<:AbstractPattern}}) = new{typeof(r)}(r)
end
MatchSplitsTokenization(r::AbstractString) = MatchSplitsTokenization(as_match(r))
MatchSplitsTokenization(r::AbstractVector) = MatchSplitsTokenization(map(as_match, r))
splitting(t::MatchSplitsTokenization, s::SentenceStage) = Iterators.map(last, matchsplits(t.pattern, getvalue(s)))
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 2477 | using DataStructures: MutableLinkedList
struct MixedTokenization{T <:Tuple{AbstractTokenization, Vararg{AbstractTokenization}}} <: AbstractTokenization
ts::T
end
MixedTokenization(t, t2, ts...) = MixedTokenization((t, t2, ts...))
Base.getindex(t::MixedTokenization, i) = t.ts[i]
Base.@kwdef struct WordTokenization{S, T} <: BaseTokenization
split_sentences::S = WordTokenizers.split_sentences
tokenize::T = WordTokenizers.tokenize
end
@inline splitting(t::WordTokenization, d::DocumentStage) = t.split_sentences(getvalue(d))
@inline splitting(t::WordTokenization, s::SentenceStage) = t.tokenize(getvalue(s))
@inline splitting(t::WordTokenization, s::SubSentenceStage) = t.tokenize(getvalue(s))
"tokenizer that return flat array instead of nested array of tokens"
struct FlatTokenizer{T<:AbstractTokenization} <: AbstractTokenizer
tokenization::T
end
FlatTokenizer() = FlatTokenizer(DefaultTokenization())
tokenization(tkr::FlatTokenizer) = tkr.tokenization
@inline tokenize(tkr::FlatTokenizer, s::ParentStages, t::AbstractTokenization, x::TokenStages) = tokenize_procedure(tkr, s, t, x)
@inline tokenize(tkr::FlatTokenizer, s::ParentStages, t::AbstractTokenization, x::TokenStage) = isempty(getvalue(x)) ? TokenStage[] : TokenStage[wrap(tkr, s, t, x)]
"tokenizer that return nested array instead of flat array of tokens"
struct NestedTokenizer{T<:AbstractTokenization} <: AbstractTokenizer
tokenization::T
end
NestedTokenizer() = NestedTokenizer(DefaultTokenization())
tokenization(tkr::NestedTokenizer) = tkr.tokenization
@inline tokenize(tkr::NestedTokenizer, p::ParentStages, t::AbstractTokenization, x::TokenStages) = collect(tokenize_procedure!(push!, MutableLinkedList{Vector{Vector}}(), tkr, p, t, x))
@inline tokenize(tkr::NestedTokenizer, p::ParentStages, t::AbstractTokenization, x::DocumentStage) = collect(tokenize_procedure!(push!, MutableLinkedList{Vector{TokenStage}}(), tkr, p, t, x))
@inline tokenize(tkr::NestedTokenizer, p::ParentStages, t::AbstractTokenization, x::Union{SentenceStage, SubSentenceStage, WordStage, SubWordStage}) = collect(tokenize_procedure!(append!, MutableLinkedList{TokenStage}(), tkr, p, t, x))
@inline tokenize(tkr::NestedTokenizer, ::Nothing, t::AbstractTokenization, x::SentenceStage) = [tokenize_procedure(tkr, nothing, t, x)]
@inline tokenize(tkr::NestedTokenizer, p::ParentStages, t::AbstractTokenization, x::TokenStage) = isempty(getvalue(x)) ? TokenStage[] : TokenStage[wrap(tkr, p, t, x)]
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 3141 | using Unicode: normalize
using Base.Unicode: utf8proc_map, UTF8PROC_STABLE, UTF8PROC_COMPAT, UTF8PROC_COMPOSE,
UTF8PROC_DECOMPOSE, UTF8PROC_IGNORE, UTF8PROC_REJECTNA, UTF8PROC_NLF2LS, UTF8PROC_NLF2PS,
UTF8PROC_NLF2LF, UTF8PROC_STRIPCC, UTF8PROC_CASEFOLD, UTF8PROC_CHARBOUND, UTF8PROC_LUMP,
UTF8PROC_STRIPMARK
_utf8proc_flags(nf::Symbol) = if nf === :NFC
return UTF8PROC_STABLE | UTF8PROC_COMPOSE
elseif nf === :NFD
return UTF8PROC_STABLE | UTF8PROC_DECOMPOSE
elseif nf === :NFKC
return UTF8PROC_STABLE | UTF8PROC_COMPOSE | UTF8PROC_COMPAT
elseif nf === :NFKD
return UTF8PROC_STABLE | UTF8PROC_DECOMPOSE | UTF8PROC_COMPAT
else
throw(ArgumentError(":$nf is not one of :NFC, :NFD, :NFKC, :NFKD"))
end
function _utf8proc_flags(;
stable::Bool=false,
compat::Bool=false,
compose::Bool=true,
decompose::Bool=false,
stripignore::Bool=false,
rejectna::Bool=false,
newline2ls::Bool=false,
newline2ps::Bool=false,
newline2lf::Bool=false,
stripcc::Bool=false,
casefold::Bool=false,
lump::Bool=false,
stripmark::Bool=false,
)
flags = 0
stable && (flags = flags | UTF8PROC_STABLE)
compat && (flags = flags | UTF8PROC_COMPAT)
if decompose
flags = flags | UTF8PROC_DECOMPOSE
elseif compose
flags = flags | UTF8PROC_COMPOSE
elseif compat || stripmark
throw(ArgumentError("compat=true or stripmark=true require compose=true or decompose=true"))
end
stripignore && (flags = flags | UTF8PROC_IGNORE)
rejectna && (flags = flags | UTF8PROC_REJECTNA)
newline2ls + newline2ps + newline2lf > 1 && throw(ArgumentError("only one newline conversion may be specified"))
newline2ls && (flags = flags | UTF8PROC_NLF2LS)
newline2ps && (flags = flags | UTF8PROC_NLF2PS)
newline2lf && (flags = flags | UTF8PROC_NLF2LF)
stripcc && (flags = flags | UTF8PROC_STRIPCC)
casefold && (flags = flags | UTF8PROC_CASEFOLD)
lump && (flags = flags | UTF8PROC_LUMP)
stripmark && (flags = flags | UTF8PROC_STRIPMARK)
return flags
end
function _show_utf8proc_flags(io::IO, flags)
(flags & UTF8PROC_STABLE > 0) &&
print(io, ", stable = true")
(flags & UTF8PROC_COMPAT > 0) &&
print(io, ", compat = true")
(flags & UTF8PROC_DECOMPOSE > 0) &&
print(io, ", decompose = true")
(flags & UTF8PROC_COMPOSE > 0) &&
print(io, ", compose = true")
(flags & UTF8PROC_IGNORE > 0) &&
print(io, ", stripignore = true")
(flags & UTF8PROC_REJECTNA > 0) &&
print(io, ", rejectna = true")
(flags & UTF8PROC_NLF2LS > 0) &&
print(io, ", newline2ls = true")
(flags & UTF8PROC_NLF2PS > 0) &&
print(io, ", newline2ps = true")
(flags & UTF8PROC_NLF2LF > 0) &&
print(io, ", newline2lf = true")
(flags & UTF8PROC_STRIPCC > 0) &&
print(io, ", stripcc = true")
(flags & UTF8PROC_CASEFOLD > 0) &&
print(io, ", casefold = true")
(flags & UTF8PROC_LUMP > 0) &&
print(io, ", lump = true")
(flags & UTF8PROC_STRIPMARK > 0) &&
print(io, ", stripmark = true")
end
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 41256 | using DataStructures: MutableLinkedList
using FuncPipelines: FixRest
using RustRegex
using Base.PCRE
isnestedconcretetype(_) = true
@generated function isnestedconcretetype(::Type{T}) where T
return isconcretetype(T) && all(isnestedconcretetype, T.parameters)
end
# match utils
literal_match_regex(s::Union{AbstractString, AbstractChar}, flags...) = Regex(Base.wrap_string(s, UInt32(0)), flags...)
as_match(r::AbstractPattern) = r
as_match(s::Union{AbstractString, AbstractChar}) = literal_match_regex(s)
abstract type AbstractMatchSplitIterState{P <: AbstractPattern} end
mutable struct MatchSplitIterRegexState <: AbstractMatchSplitIterState{Regex}
i::Int
matched::UnitRange{Int}
data::Ptr{Nothing}
function MatchSplitIterRegexState(regex::Regex, i = 1)
Base.compile(regex)
data = PCRE.create_match_data(regex.regex)
state = new(i, 0:0, data)
finalizer(state) do s
s.data == C_NULL || PCRE.free_match_data(s.data)
end
return state
end
end
function matchsplit_iterate!(regex::Regex, e, s, state::MatchSplitIterRegexState)
i, matched, data = state.i, state.matched, state.data
_regex = regex.regex
opts = regex.match_options
if !iszero(matched)
str = @inbounds SubString(s, matched.start, matched.stop)
state.matched = 0:0
return (true, str), state
end
if i > e
return nothing
end
if !PCRE.exec(_regex, s, i-1, opts, data)
str = @inbounds SubString(s, i, e)
state.i = typemax(Int)
return (false, str), state
end
p = PCRE.ovec_ptr(data)
ri = Int(unsafe_load(p, 1)) + 1
re = prevind(s, Int(unsafe_load(p, 2)) + 1)
matched = ri:re
ni = nextind(s, re)
if i != ri
str = @inbounds SubString(s, i, prevind(s, ri))
state.i = ni
state.matched = matched
return (false, str), state
end
str = @inbounds SubString(s, ri, re)
state.i = ni
state.matched = 0:0
return (true, str), state
end
mutable struct MatchSplitIterRuRegexState <: AbstractMatchSplitIterState{RuRegex}
i::Int
matched::UnitRange{Int}
itr::Ptr{Cvoid}
function MatchSplitIterRuRegexState(regex::RuRegex, i = 1)
obj = RustRegex.RuRE.rure_iter_new(regex)
state = new(i, 0:0, obj)
finalizer(state) do x
x.itr == C_NULL || RustRegex.RuRE.rure_iter_free(x.itr)
end
return state
end
end
function matchsplit_iterate!(regex::RuRegex, e, s, state::MatchSplitIterRuRegexState)
i, matched, itr = state.i, state.matched, state.itr
if !iszero(matched)
str = @inbounds SubString(s, matched.start, matched.stop)
state.matched = 0:0
return (true, str), state
end
if i > e
return nothing
end
m = Ref{UnitRange{UInt}}(0:0)
len = ncodeunits(s)
if !RustRegex.RuRE.rure_iter_next(itr, s, len, m)
str = @inbounds SubString(s, i, e)
state.i = typemax(Int)
return (false, str), state
end
_m = m[]
ri = thisind(s, Int(_m.start) + 1)
re = thisind(s, Int(_m.stop))
matched = ri:re
ni = nextind(s, re)
if i != ri
str = @inbounds SubString(s, i, prevind(s, ri))
state.i = ni
state.matched = matched
return (false, str), state
end
str = @inbounds SubString(s, ri, re)
state.i = ni
state.matched = 0:0
return (true, str), state
end
struct MatchSplitIterPatternAndState{P <: AbstractPattern, S <: AbstractMatchSplitIterState{P}}
pattern::P
state::S
end
MatchSplitIterPatternAndState(pattern::AbstractPattern, str::SubString{String}) =
MatchSplitIterPatternAndState(pattern, firstindex(str))
MatchSplitIterPatternAndState(regex::Regex, i::Int) = MatchSplitIterPatternAndState(regex, MatchSplitIterRegexState(regex, i))
MatchSplitIterPatternAndState(regex::RuRegex, i::Int) = MatchSplitIterPatternAndState(regex, MatchSplitIterRuRegexState(regex, i))
struct MatchSplitIterator{P<:MatchSplitIterPatternAndState}
regex_and_state::P
lastidx::Int
str::SubString{String}
function MatchSplitIterator(regex::AbstractPattern, str::SubString{String})
regex_and_state = MatchSplitIterPatternAndState(regex, str)
return new{typeof(regex_and_state)}(regex_and_state, lastindex(str), str)
end
end
MatchSplitIterator(regex::AbstractPattern, str::String) = MatchSplitIterator(regex, SubString(str))
MatchSplitIterator(regex, str) = MatchSplitIterator(literal_match_regex(regex), str)
Base.eltype(::Type{<:MatchSplitIterator}) = Tuple{Bool, SubString{String}}
Base.IteratorSize(::Type{<:MatchSplitIterator}) = Base.SizeUnknown()
Base.show(io::IO, itr::MatchSplitIterator) = (print(io, "MatchSplitIterator("); show(io, itr.regex_and_state.pattern); print(io, ", "); show(io, itr.str); print(io, ')'))
function Base.iterate(itr::MatchSplitIterator, _ = nothing)
regex_and_state = itr.regex_and_state
state = regex_and_state.state
e = itr.lastidx
v_state = matchsplit_iterate!(regex_and_state.pattern, e, itr.str, state)
isnothing(v_state) && return nothing
v = first(v_state)
return v, nothing
end
struct MatchSplits{P <: AbstractPattern, I <: MatchSplitIterator}
regexes::Vector{P}
str::SubString{String}
states::MutableLinkedList{I}
function MatchSplits(regexes::Vector{P}, str::SubString{String}) where P <:AbstractPattern
n = length(regexes)
@assert n != 0
itr1 = MatchSplitIterator(@inbounds(regexes[1]), str)
if P == AbstractPattern
states = MutableLinkedList{MatchSplitIterator}(itr1)
else
states = MutableLinkedList{typeof(itr1)}(itr1)
end
return new{P, eltype(states)}(regexes, str, states)
end
end
MatchSplits(regexes::Vector{<:AbstractPattern}, str::String) = MatchSplits(regexes, SubString(str))
MatchSplits(regexes, str) = MatchSplits(map(as_match, regexes), str)
Base.eltype(::Type{<:MatchSplits}) = Tuple{Bool, SubString{String}}
Base.IteratorSize(::Type{<:MatchSplits}) = Base.SizeUnknown()
Base.show(io::IO, itr::MatchSplits) = (print(io, "MatchSplits("); show(io, itr.regexes); print(io, ", "); show(io, itr.str); print(io, ')'))
function Base.iterate(itr::MatchSplits, _ = nothing)
state = itr.states
@label ms_itr_start
level = length(state)
iszero(level) && return nothing
itr_i = @inbounds state[level]
I = Base.iterate(itr_i)
if isnothing(I)
pop!(state)
@goto ms_itr_start
end
v, _ = I
ismatch = v[1]
if ismatch
return v, nothing
else
if level == length(itr.regexes)
return v, nothing
else
regex_j = @inbounds itr.regexes[level+1]
itr_j = MatchSplitIterator(regex_j, v[2])
push!(state, itr_j)
@goto ms_itr_start
end
end
end
matchsplit(t, s) = MatchSplitIterator(t, s)
matchsplits(t::AbstractPattern, s) = matchsplit(t, s)
matchsplits(t::Vector{<:AbstractPattern}, s) = isone(length(t)) ? matchsplits(@inbounds(t[1]), s) : MatchSplits(t, s)
"""
matchsplits(pattern::AbstractPattern, str::String)
Split `str` with the regular expression `pattern`. Return a lazy iterator where each element
is a `Tuple{Bool, SubString}`. The `Bool` indicate whether the `SubString` is a match of `pattern`.
# Example
```julia-repl
julia> matchsplits(r"a|c", "abc"^3)
MatchSplitIterator(r"a|c", "abcabcabc")
julia> collect(matchsplits(r"a|c", "abc"^3))
9-element Vector{Tuple{Bool, SubString{String}}}:
(1, "a")
(0, "b")
(1, "c")
(1, "a")
(0, "b")
(1, "c")
(1, "a")
(0, "b")
(1, "c")
```
"""
matchsplits(t::AbstractPattern, s)
"""
matchsplits(patterns::Vector{<:AbstractPattern}, str::String)
Split `str` with the list of regular expression `patterns`. Return a lazy iterator where each
element is a `Tuple{Bool, SubString}`. The `Bool` indicate whether the `SubString` is a match of `pattern`.
The match order are specified by the list order.
# Example
```julia-repl
julia> matchsplits([r"a", r"c"], "abc"^3)
MatchSplits(Regex[r"a", r"c"], "abcabcabc")
julia> collect(matchsplits([r"a", r"c"], "abc"^3))
9-element Vector{Tuple{Bool, SubString{String}}}:
(1, "a")
(0, "b")
(1, "c")
(1, "a")
(0, "b")
(1, "c")
(1, "a")
(0, "b")
(1, "c")
julia> collect(matchsplits([r"ab", r"bc"], "abc"^3))
6-element Vector{Tuple{Bool, SubString{String}}}:
(1, "ab")
(0, "c")
(1, "ab")
(0, "c")
(1, "ab")
(0, "c")
```
"""
matchsplits(t::Vector{<:AbstractPattern}, s)
struct FindAllIterator{S, P}
pattern::P
str::S
end
Base.eltype(::Type{<:FindAllIterator{Union{String, SubString}}}) = SubString{String}
Base.eltype(::Type{<:FindAllIterator{AbstractString}}) = String
Base.IteratorSize(::Type{<:FindAllIterator}) = Base.SizeUnknown()
function Base.iterate(itr::FindAllIterator, state = firstindex(itr.str))
str = itr.str
found = findnext(itr.pattern, str, state)
isnothing(found) && return nothing
result = @inbounds eltype(itr) <: SubString ? @view(str[found]) : str[found]
nstate = nextind(str, last(found))
return result, nstate
end
# misc
nestedcall(f) = Base.Fix1(nestedcall, f)
nestedcall(f, x::AbstractArray) = map(nestedcall(f), x)
nestedcall(f, x) = f(x)
function _nestedcall_f!(f, ys, xs)
@inbounds for i in eachindex(xs, ys)
ys[i] = nestedcall(f, xs[i])
end
return ys
end
function _nestedcall_f_fallback!(f, ys, xs)
S = Union{}
@inbounds for i in eachindex(xs, ys)
ys[i] = nestedcall(f, xs[i])
S = promote_type(S, typeof(ys[i]))
end
return S, ys
end
function nestedcall(f, xs::Array)
R = Core.Compiler.return_type(nestedcall, Tuple{typeof(f), eltype(xs)})
if Base.isconcretetype(R)
return _nestedcall_f!(f, similar(xs, R), xs)
else
S, ys = _nestedcall_f_fallback!(f, similar(xs, R), xs)
if S != R
zs = similar(xs, S)
copyto!(zs, ys)
return zs
end
return ys
end
end
# encode utils
const NotASample = -2
const UnknownSample = -1
const SampleElement = 0
const SingleSample = 1
const ArraySample = 2
const NestedSample = 3
_sequence_of(x) = x + (x >= 0)
"""
type_sequence_sample_type([T::Type,] t::Type)
Get the depth of the nested array type. If return natural number, `t` is a type of nested array.
Return `-1` if it cannot be known by type and return `-2` if `t` is not a nested array type.
Specify `T` to check if `t` is a nested array type with element type `T`.
If `T` is not specified, every type not subtype to `AbstractArray` is a count as element type.
see also: [`sequence_sample_type`](@ref), [`peek_sequence_sample_type`](@ref)
# Example
```julia-repl
julia> type_sequence_sample_type(Vector{Vector{Integer}})
2
julia> type_sequence_sample_type(Number, Array{Vector{Union{Float64, Int}}})
2
julia> type_sequence_sample_type(Int, Array{Vector{Union{Float64, Int}}})
-2
```
"""
function type_sequence_sample_type(@nospecialize(T::Type), @nospecialize(t::Type))
t <: T && return SampleElement
if t isa Union
st_a = type_sequence_sample_type(T, t.a)
st_b = type_sequence_sample_type(T, t.b)
return st_a == st_b ? st_a : NotASample
end
t <: AbstractArray || return NotASample
et = t >: AbstractArray ? Base.unwrap_unionall(t).parameters[1] : eltype(t)
if et isa DataType || et isa UnionAll
if et <: T
return SingleSample
elseif et <: AbstractArray
st = type_sequence_sample_type(T, et)
return _sequence_of(st)
elseif et >: AbstractArray
return UnknownSample
else
return NotASample
end
elseif et isa Union
st = type_sequence_sample_type(T, et)
return _sequence_of(st)
end
return UnknownSample
end
function type_sequence_sample_type(@nospecialize(t::Type))
if t isa Union
st_a = type_sequence_sample_type(t.a)
st_b = type_sequence_sample_type(t.b)
return st_a == st_b ? st_a : NotASample
end
t <: AbstractArray || return SampleElement
et = t >: AbstractArray ? Base.unwrap_unionall(t).parameters[1] : eltype(t)
if et isa DataType || et isa UnionAll
if et <: AbstractArray
st = type_sequence_sample_type(et)
return _sequence_of(st)
elseif et >: AbstractArray
return UnknownSample
else
return SingleSample
end
elseif et isa Union
st = type_sequence_sample_type(et)
return _sequence_of(st)
end
return UnknownSample
end
"""
sequence_sample_type([T::Type,] x)
Get the depth of the nested array. If return natural number, `x` is a nested array where each element has the same depth.
Return `-2` if `x` is not a nested array or the depth of elements are different. Depth of empty array compute with the
type and `sequence_sample_type(Any[])` is `1`. Specify `T` to check if `x` is a nested array with element type `T`.
If `T` is not specified, every type not subtype to `AbstractArray` is a count as element type.
see also: [`type_sequence_sample_type`](@ref), [`peek_sequence_sample_type`](@ref)
# Example
```julia-repl
julia> sequence_sample_type([[1,2,3]])
2
julia> sequence_sample_type([[[2,3], [1]], Vector{Int}[]])
3
julia> sequence_sample_type([[[2,3], [1]], Any[]])
-2
julia> sequence_sample_type(Int, [[1,2], 3])
-2
julia> sequence_sample_type(Int, Any[[1,2], Int[]])
2
```
"""
function sequence_sample_type(x)
S = typeof(x)
stype = type_sequence_sample_type(S)
if stype == UnknownSample
itr = iterate(x)
if !isnothing(itr)
xi, state = itr
elst = sequence_sample_type(xi)
elst == NotASample && return NotASample
itr = iterate(x, state)
while !isnothing(itr)
xi, state = itr
elst2 = sequence_sample_type(xi)
elst != elst2 && return NotASample
itr = iterate(x, state)
end
return _sequence_of(elst)
end
ET = eltype(S)
return ET <: AbstractArray || ET != Any ? ArraySample : SingleSample
end
return stype
end
function sequence_sample_type(T::Type, x)
S = typeof(x)
stype = type_sequence_sample_type(T, S)
if stype == UnknownSample
itr = iterate(x)
if !isnothing(itr)
xi, state = itr
elst = sequence_sample_type(T, xi)
elst == NotASample && return NotASample
itr = iterate(x, state)
while !isnothing(itr)
xi, state = itr
elst2 = sequence_sample_type(T, xi)
elst != elst2 && return NotASample
itr = iterate(x, state)
end
return _sequence_of(elst)
end
ET = eltype(S)
return ET <: AbstractArray || ET != Any ? ArraySample : SingleSample
end
return stype
end
"""
peek_sequence_sample_type([T::Type,] x)
Non-recursive version of `sequence_sample_type`. Return `-1` if the `x` is an array of array with unknown elements,
thus it's possible that `sequence_sample_type(x[i]) == -2`. Specify `T` to check if `x` is a nested array with
element type `T`. If `T` is not specified, every type not subtype to `AbstractArray` is a count as element type.
see also: [`type_sequence_sample_type`](@ref), [`sequence_sample_type`](@ref)
# Example
```julia-repl
julia> TextEncodeBase.peek_sequence_sample_type([1,2,3])
1
julia> peek_sequence_sample_type(Int, Any[[[1,2,3]]]), sequence_sample_type(Int, Any[[[1,2,3]]])
(-1, 3)
julia> peek_sequence_sample_type(Int, [[[1,2,3], "abc"]]), sequence_sample_type(Int, [[[1,2,3], "abc"]])
(-1, -2)
```
"""
function peek_sequence_sample_type(x)
S = typeof(x)
stype = type_sequence_sample_type(S)
if stype == UnknownSample
itr = iterate(x)
if !isnothing(itr)
xi, state = itr
elst = xi isa AbstractArray ? SingleSample : SampleElement
itr = iterate(x, state)
while !isnothing(itr)
xi, state = itr
elst2 = xi isa AbstractArray ? SingleSample : SampleElement
elst != elst2 && return NotASample
itr = iterate(x, state)
end
return elst == SampleElement ? SingleSample : UnknownSample
end
ET = eltype(S)
return ET <: AbstractArray || ET != Any ? ArraySample : SingleSample
end
return stype
end
function peek_sequence_sample_type(T::Type, x)
S = typeof(x)
stype = type_sequence_sample_type(T, S)
if stype == UnknownSample
itr = iterate(x)
if !isnothing(itr)
xi, state = itr
elst = xi isa AbstractArray ? SingleSample : xi isa T ? SampleElement : NotASample
elst == NotASample && return NotASample
itr = iterate(x, state)
while !isnothing(itr)
xi, state = itr
elst2 = xi isa AbstractArray ? SingleSample : xi isa T ? SampleElement : NotASample
elst != elst2 && return NotASample
itr = iterate(x, state)
end
return elst == SampleElement ? SingleSample : UnknownSample
end
ET = eltype(S)
return ET <: AbstractArray || ET != Any ? ArraySample : SingleSample
end
return stype
end
macro elementmap(sym::Symbol, ex::Expr)
!Meta.isexpr(ex, :call) && error("not a function call: $ex")
func = ex.args[1]
has_x = false
argtype = Expr(:curly, :Tuple)
x_i = Symbol("#", sym, :_i)
fcall = Expr(:call, func)
for i = 2:length(ex.args)
argi = ex.args[i]
if argi == sym
has_x = true
push!(argtype.args, :(eltype($argi)))
push!(fcall.args, x_i)
else
push!(argtype.args, :(typeof($argi)))
push!(fcall.args, argi)
end
end
!has_x && error("no $sym in function call")
f = Expr(:->, x_i, Expr(:block, fcall))
ET = Expr(:call, :(Core.Compiler.return_type), func, argtype)
RT = Expr(:curly, Array, ET, Expr(:call, :ndims, sym))
y = Expr(:call, RT, :undef, Expr(:call, :size, sym))
r = Expr(:call, :map!, f, y, sym)
return esc(r)
end
allany(f, x) = mapfoldl(f, _allany, x; init=(true, false))
_allany(a, b) = a[1] & b, a[2] | b
"""
with_head_tail(x, head, tail)
Return `[head; x; tail]`. Ignored if `head` or `tail` is `nothing`. `x` can be nested arrays.
# Example
```julia
julia> TextEncodeBase.with_head_tail(1:5, -1, -2)
7-element Vector{Int64}:
-1
1
2
3
4
5
-2
julia> TextEncodeBase.with_head_tail([1:5, 2:3], -1, -2)
2-element Vector{Vector{Int64}}:
[-1, 1, 2, 3, 4, 5, -2]
[-1, 2, 3, -2]
```
"""
function with_head_tail(x::AbstractArray, head, tail)
stype = peek_sequence_sample_type(x)
if stype == SingleSample
T = eltype(x)
if T == Any
return _with_head_tail(mapreduce(typeof, promote_type, x), x, head, tail)
else
return _with_head_tail(x, head, tail)
end
elseif stype >= UnknownSample
return @elementmap x with_head_tail(x, head, tail)
# return map(FixRest(with_head_tail, head, tail), x)
else
error("Input array is mixing array and non-array elements")
end
end
with_head_tail(head, tail) = FixRest(with_head_tail, head, tail)
with_head_tail(x; head=nothing, tail=nothing) = with_head_tail(x, head, tail)
with_head_tail(; head=nothing, tail=nothing) = with_head_tail(head, tail)
@inline function _with_head_tail(::Type{T}, x, head, tail) where T
S = T
n = length(x)
!isnothing(head) && ((n, S) = (n+1, promote_type(S, typeof(head))))
!isnothing(tail) && ((n, S) = (n+1, promote_type(S, typeof(tail))))
vec = Vector{S}(undef, n); empty!(vec)
!isnothing(head) && push!(vec, head)
append!(vec, x)
!isnothing(tail) && push!(vec, tail)
return vec
end
_with_head_tail(x, head, tail) = _with_head_tail(eltype(x), x, head, tail)
"""
trunc_or_pad(x, n, pad)
Truncate `x` to length `n`, or add `pad` at the end of x until length equal `n`.
`x` can be either nested or single array. if `n` is `nothing`, the largest length of
the inner-most array will be used.
trunc_or_pad(x, n, pad, trunc_end = :tail, pad_end = :tail)
`trunc_end` and `pad_end` specified whether the truncation and padding happened at the begining of the
sentences or the end of the sentence. The value is either `:tail` (means the end) or `:head` (means the
begining).
trunc_or_pad(n, pad, trunc_end = :tail, pad_end = :tail)
Create a function that will return new array with truncated or padded value of the input.
see also: [`trunc_and_pad`](@ref)
# Example
```julia
julia> TextEncodeBase.trunc_or_pad(1:5, 7, -1)
7-element Vector{Int64}:
1
2
3
4
5
-1
-1
julia> TextEncodeBase.trunc_or_pad([1:5, 2:7], 10, -1)
2-element Vector{Vector{Int64}}:
[1, 2, 3, 4, 5, -1, -1, -1, -1, -1]
[2, 3, 4, 5, 6, 7, -1, -1, -1, -1]
julia> TextEncodeBase.trunc_or_pad([1:5, [2:7, [1:2]]], nothing, -1)
2-element Vector{Vector}:
[1, 2, 3, 4, 5, -1]
Vector[[2, 3, 4, 5, 6, 7], [[1, 2, -1, -1, -1, -1]]]
```
"""
function trunc_or_pad(x::AbstractArray, n::Integer, pad, trunc_end::Symbol = :tail, pad_end::Symbol = :tail)
stype = peek_sequence_sample_type(x)
if stype == SingleSample
return trunc_or_pad!(similar(x, n), x, n, pad, trunc_end, pad_end)
elseif stype >= UnknownSample
return @elementmap x trunc_or_pad(x, n, pad, trunc_end, pad_end)
# return map(trunc_or_pad(n, pad, trunc_end, pad_end), x)
else
error("Input array is mixing array and non-array elements")
end
end
trunc_or_pad(x, ::Nothing, pad, trunc_end::Symbol = :tail, pad_end::Symbol = :tail) =
trunc_or_pad(x, nestedmaxlength(x), pad, trunc_end, pad_end)
trunc_or_pad(n, pad, trunc_end::Symbol = :tail, pad_end::Symbol = :tail) =
FixRest(trunc_or_pad, n, pad, trunc_end, pad_end)
trunc_or_pad(x; n = nothing, pad, trunc_end::Symbol = :tail, pad_end::Symbol = :tail) =
trunc_or_pad(x, n, pad, trunc_end, pad_end)
trunc_or_pad(; n = nothing, pad, trunc_end::Symbol = :tail, pad_end::Symbol = :tail) =
trunc_or_pad(n, pad, trunc_end, pad_end)
function trunc_or_pad!(vec, x, n, pad, trunc_end, pad_end)
len = length(x)
if len <= n # pad
if pad_end == :tail
copyto!(vec, x)
vec[len+1:n] .= pad
elseif pad_end == :head
pad_prefix_size = n - len
copyto!(vec, pad_prefix_size + 1, x, 1, len)
vec[1:pad_prefix_size] .= pad
else
error("`pad_end` is not :head or :tail but: $pad_end")
end
else # trunc
if trunc_end == :tail
copyto!(vec, 1, x, 1, n)
elseif trunc_end == :head
copyto!(vec, 1, x, len - n + 1, n)
else
error("`trunc_end` is not :head or :tail but: $trunc_end")
end
end
return vec
end
"""
trunc_and_pad(x, maxn, pad)
Truncate `x` if length exceed `maxn`, and add `pad` at the end of x until all length are the same.
`x` can be either nested or single array. If `maxn` is `nothing`, the largest length of
the inner-most array will be used, then the behavior equals to `trunc_or_pad` with `nothing`.
trunc_and_pad(x, maxn, pad, trunc_end = :tail, pad_end = :tail)
`trunc_end` and `pad_end` specified whether the truncation and padding happened at the begining of the
sentences or the end of the sentence. The value is either `:tail` (means the end) or `:head` (means the
begining).
trunc_and_pad(maxn, pad, trunc_end = :tail, pad_end = :tail)
Create a function that truncate input to be length <= `maxn`, and add `pad` until all input has equal length.
see also: [`trunc_or_pad`](@ref)
# Example
```julia
julia> TextEncodeBase.trunc_and_pad(1:5, 7, -1)
5-element Vector{Int64}:
1
2
3
4
5
julia> TextEncodeBase.trunc_and_pad([1:5, 2:7], 10, -1)
2-element Vector{Vector{Int64}}:
[1, 2, 3, 4, 5, -1]
[2, 3, 4, 5, 6, 7]
julia> TextEncodeBase.trunc_and_pad([1:5, [2:7, [1:2]]], nothing, -1)
2-element Vector{Vector}:
[1, 2, 3, 4, 5, -1]
Vector[[2, 3, 4, 5, 6, 7], [[1, 2, -1, -1, -1, -1]]]
```
"""
trunc_and_pad(x, maxn, pad, trunc_end::Symbol = :tail, pad_end::Symbol = :tail) =
(n = nestedmaxlength(x); _trunc_and_pad(x, n, isnothing(maxn) ? n : maxn, pad, trunc_end, pad_end))
trunc_and_pad(maxn, pad, trunc_end::Symbol = :tail, pad_end::Symbol = :tail) =
FixRest(trunc_and_pad, maxn, pad, trunc_end, pad_end)
trunc_and_pad(x; maxn=nothing, pad, trunc_end::Symbol = :tail, pad_end::Symbol = :tail) =
trunc_and_pad(x, maxn, pad, trunc_end, pad_end)
trunc_and_pad(; maxn=nothing, pad, trunc_end::Symbol = :tail, pad_end::Symbol = :tail) =
trunc_and_pad(maxn, pad, trunc_end, pad_end)
@inline _trunc_and_pad(x, n, maxn, pad, trunc_end, pad_end) = trunc_or_pad(x, min(n, maxn), pad, trunc_end, pad_end)
function nestedmaxlength(x::AbstractArray)
stype = peek_sequence_sample_type(x)
if stype == SingleSample
return length(x)
elseif stype >= UnknownSample
return mapfoldl(nestedmaxlength, max, x)
else
error("Input array is mixing array and non-array elements")
end
end
_checkeqsize(x, y) = x == y ? x : throw(DimensionMismatch("nested size not the same: $x != $y"))
function nestedsize(x::AbstractArray)
stype = peek_sequence_sample_type(x)
if stype == SingleSample
return size(x)
elseif stype >= UnknownSample
s1 = nestedsize(first(x))
mapfoldl(nestedsize, _checkeqsize, @view(reshape(x, :)[2:end]); init = s1)
return (s1..., size(x)...)
else
error("Input array is mixing array and non-array elements")
end
end
function nestedtype(x::AbstractArray)
stype = peek_sequence_sample_type(x)
if stype == SingleSample
return mapreduce(typeof, promote_type, x)
elseif stype >= UnknownSample
return mapreduce(nestedtype, promote_type, x)
else
error("Input array is mixing array and non-array elements")
end
end
"""
nested2batch(x)
convert nested array into single array
See also: [`batch2nested`](@ref)
# Example
```julia
julia> TextEncodeBase.nested2batch([[[1 2],[3 4]]])
1×2×2×1 Array{Int64, 4}:
[:, :, 1, 1] =
1 2
[:, :, 2, 1] =
3 4
```
"""
function nested2batch(x)
ns = nestedsize(x)
arr = Array{nestedtype(x), length(ns)}(undef, ns)
_nested2batch!(arr, 1, x)
return arr
end
_reduce_nested(dst_offset, xi) = dst_offset[1], _nested2batch!(dst_offset..., xi)[2]
function _nested2batch!(arr, offset, x::AbstractArray)
stype = peek_sequence_sample_type(x)
if stype == SingleSample
copyto!(arr, offset, x, 1, length(x))
return (arr, offset+length(x))
elseif stype >= UnknownSample
return foldl(_reduce_nested, x; init=(arr, offset))
else
error("Input array is mixing array and non-array elements")
end
end
"""
batch2nested(x)
convert single array into nested array.
See also: [`nested2batch`](@ref)
# Example
```julia-repl
julia> x = ["a" "d"; "b" "e"; "c" "f";;; "x" "u"; "y" "v"; "z" "w"; ]
3×2×2 Array{String, 3}:
[:, :, 1] =
"a" "d"
"b" "e"
"c" "f"
[:, :, 2] =
"x" "u"
"y" "v"
"z" "w"
julia> TextEncodeBase.batch2nested(x)
2-element Vector{Vector{Vector{String}}}:
[["a", "b", "c"], ["d", "e", "f"]]
[["x", "y", "z"], ["u", "v", "w"]]
```
"""
function batch2nested(x::AbstractArray)
return _batch2nested(x, size(x))
end
_batch2nested(x, ::Tuple{Int}) = collect(x)
@static if VERSION < v"1.9"
function _batch2nested(x, s::Tuple)
dim = length(s)
len = s[end]
s = Base.front(s)
X = eachslice(x; dims = dim)
y = Vector{Core.Compiler.return_type(_batch2nested, Tuple{eltype(X), typeof(s)})}(undef, len)
@inbounds for (i, xi) in enumerate(X)
y[i] = _batch2nested(xi, s)
end
return y
end
else
function _batch2nested(x, s::Tuple)
dim = length(s)
len = s[end]
s = Base.front(s)
X = eachslice(x; dims = dim)
y = Vector{Core.Compiler.return_type(_batch2nested, Tuple{eltype(X), typeof(s)})}(undef, len)
return map!(xi->_batch2nested(xi, s), y, X)
end
end
"""
join_text(x::AbstractArray [, delim [, last]])
`join` the inner most array and preserve the array structure. If the inner most array is multi-dimensional, `join`
text along the first dimension.
# Example
```julia-repl
julia> TextEncodeBase.join_text([["a", "b", "c"], ['x', 'y', 'z']])
2-element Vector{String}:
"abc"
"xyz"
julia> TextEncodeBase.join_text([["a", "b", "c"], ['x', 'y', 'z']], " + ")
2-element Vector{String}:
"a + b + c"
"x + y + z"
julia> TextEncodeBase.join_text([[["a", "b", "c"], ['x', 'y', 'z']]], " + ", " = ")
1-element Vector{Vector{String}}:
["a + b = c", "x + y = z"]
julia> TextEncodeBase.join_text(["a" "d"; "b" "e"; "c" "f";;; "x" "u"; "y" "v"; "z" "w"; ], " + ", " = ")
2×2 Matrix{String}:
"a + b = c" "x + y = z"
"d + e = f" "u + v = w"
```
"""
@static if VERSION < v"1.9"
function join_text(x::AbstractArray, delim = "", last = delim)
stype = peek_sequence_sample_type(x)
if stype == SingleSample
N = ndims(x)
if N == 1
return join(x, delim, last)
else
return reshape(mapslices(FixRest(join, delim, last), x; dims = 1), Base.tail(size(x)))
end
elseif stype >= UnknownSample
return @elementmap x join_text(x, delim, last)
else
error("Input array is mixing array and non-array elements")
end
end
else
function join_text(x::AbstractArray, delim = "", last = delim)
stype = peek_sequence_sample_type(x)
if stype == SingleSample
N = ndims(x)
if N == 1
return join(x, delim, last)
else
return map(FixRest(join, delim, last), eachslice(x, dims = ntuple(x->x+1, Val(N - 1))))
end
elseif stype >= UnknownSample
return @elementmap x join_text(x, delim, last)
# return map(FixRest(join_text, delim, last), x)
else
error("Input array is mixing array and non-array elements")
end
end
end
# Sequence template
"""
abstract type TemplateTerm{T} end
Abstract type for term used in [`SequenceTemplate`](@ref).
"""
abstract type TemplateTerm{T} end
Base.eltype(::TemplateTerm{T}) where T = T
"""
InputTerm{T}(type_id = 1)
A `TemplateTerm` that take out a sequence from the input.
"""
struct InputTerm{T} <: TemplateTerm{T}
type_id::Int
InputTerm{T}(type_id = 1) where T = new{T}(type_id)
end
"""
IndexInputTerm{T}(idx::Int, type_id = 1)
A `TemplateTerm` that take the `idx`-th sequence of the input. If the `IndexInputTerm` is also the `idx`-th
input related term in a [`SequenceTemplate`](@ref), it behave the same as [`InputTerm`](@ref).
"""
struct IndexInputTerm{T} <: TemplateTerm{T}
idx::Int
type_id::Int
IndexInputTerm{T}(idx, type_id = 1) where T = new{T}(idx, type_id)
end
"""
ConstTerm(value::T, type_id = 1)
A `TemplateTerm` that simply put `value` to the output sequence.
"""
struct ConstTerm{T} <: TemplateTerm{T}
value::T
type_id::Int
end
ConstTerm(value, type_id = 1) = ConstTerm{typeof(value)}(value, type_id)
"""
RepeatedTerm(terms::TemplateTerm...; dynamic_type_id = false)
A special term that indicate the `terms` sequence can appear zero or multiple times. Cannot be nested.
If `dynamic_type_id` is set, each repeat would add an offset value to the type id of those repeat `terms`.
The offset value if the number of repetiton, starting form `0`, times `dynamic_type_id`.
"""
struct RepeatedTerm{T, Ts<:Tuple{Vararg{TemplateTerm{T}}}} <: TemplateTerm{T}
terms::Ts
dynamic_type_id::Int
function RepeatedTerm(terms::Tuple{Vararg{TemplateTerm{T}}}, dynamic_type_id = false) where T
@assert length(terms) >= 1 "No TemplateTerm provided."
@assert !any(Base.Fix2(isa, RepeatedTerm), terms) "Cannot nest RepeatedTerm"
return new{T, typeof(terms)}(terms, dynamic_type_id)
end
end
RepeatedTerm(terms::TemplateTerm...; dynamic_type_id = false) = RepeatedTerm(terms, dynamic_type_id)
"""
SequenceTemplate(terms::TemplateTerm)(sequences...)
Constructing a function by multiple `TemplateTerm` that indicate how to combine the input `sequences`. Return
a tuple of the result sequence and a type id (a special number associated with the template term) sequence.
# Example
```julia-repl
julia> SequenceTemplate(ConstTerm(-1), InputTerm{Int}(), ConstTerm(-2))(1:5)[1] == TextEncodeBase.with_head_tail(1:5, -1, -2)
true
julia> SequenceTemplate(ConstTerm(-1), InputTerm{Int}(), ConstTerm(-2))(1:5)
([-1, 1, 2, 3, 4, 5, -2], [1, 1, 1, 1, 1, 1, 1])
julia> bert_template = SequenceTemplate(
ConstTerm("[CLS]", 1), InputTerm{String}(1), ConstTerm("[SEP]", 1),
RepeatedTerm(InputTerm{String}(2), ConstTerm("[SEP]", 2))
)
SequenceTemplate{String}([CLS]:<type=1> Input:<type=1> [SEP]:<type=1> (Input:<type=2> [SEP]:<type=2>)...)
julia> bert_template(["hello", "world"])
(["[CLS]", "hello", "world", "[SEP]"], [1, 1, 1, 1])
julia> bert_template(["hello", "world"], ["today", "is", "a", "good", "day"])
(["[CLS]", "hello", "world", "[SEP]", "today", "is", "a", "good", "day", "[SEP]"], [1, 1, 1, 1, 2, 2, 2, 2, 2, 2])
```
"""
struct SequenceTemplate{T, Ts<:Tuple{Vararg{TemplateTerm{T}}}} <: Function
terms::Ts
function SequenceTemplate(terms::Tuple{Vararg{TemplateTerm{T}}}) where T
@assert length(terms) >= 1 "No TemplateTerm provided."
@assert count(Base.Fix2(isa, RepeatedTerm), terms) <= 1 "RepeatedTerm can only appear at most once."
return new{T, typeof(terms)}(terms)
end
end
SequenceTemplate(terms::TemplateTerm...) = SequenceTemplate(terms)
Base.eltype(::SequenceTemplate{T}) where T = T
function process_term!(term::InputTerm, output, type_ids, i, j, terms, xs)
@assert j <= length(xs) "InputTerm indexing $j-th input but only get $(length(xs))"
x = xs[j]
isnothing(output) || append!(output, x)
isnothing(type_ids) || append!(type_ids, Iterators.repeated(term.type_id, length(x)))
return j + 1
end
function process_term!(term::IndexInputTerm, output, type_ids, i, j, terms, xs)
idx = term.idx
@assert idx <= length(xs) "IndexInputTerm indexing $idx-th input but only get $(length(xs))"
x = xs[idx]
isnothing(output) || append!(output, x)
isnothing(type_ids) || append!(type_ids, Iterators.repeated(term.type_id, length(x)))
return idx == j ? j + 1 : j
end
function process_term!(term::ConstTerm, output, type_ids, i, j, terms, xs)
isnothing(output) || push!(output, term.value)
isnothing(type_ids) || push!(type_ids, term.type_id)
return j
end
function process_term!(term::RepeatedTerm, output, type_ids, i, j, terms, xs)
r_terms = term.terms
dynamic_type_id = term.dynamic_type_id
n = count(Base.Fix2(isa, InputTerm), terms[i+1:end])
J = length(xs) - n
type_id_offset = 0
while j <= J
if !isnothing(type_ids)
type_id_start = length(type_ids) + 1
end
_j = j
for (t_i, term_i) in enumerate(r_terms)
j = process_term!(term_i, output, type_ids, t_i, j, r_terms, xs)
end
_j == j && error("RepeatedTerm doesn't seem to terminate")
if !isnothing(type_ids)
type_id_end = length(type_ids)
dynamic_type_id != 0 && (type_ids[type_id_start:type_id_end] .+= type_id_offset)
type_id_offset += dynamic_type_id
end
end
return j
end
function process_template!(
st::SequenceTemplate{T}, output::Union{Vector{T}, Nothing}, type_ids::Union{Vector{Int}, Nothing}, xs
) where T
terms = st.terms
len = length(xs)
n_input = count(Base.Fix2(isa, InputTerm), terms)
@assert len >= n_input "SequenceTemplate require at least $n_input but only get $len"
j = 1
for (i, term) in enumerate(terms)
j = process_term!(term, output, type_ids, i, j, terms, xs)
end
@assert j > len "SequenceTemplate only take $(j-1) inputs but get $len"
return output, type_ids
end
alloc_outputs(st::SequenceTemplate, ::Val{0}) = (Vector{eltype(st)}(), Vector{Int}())
alloc_outputs(st::SequenceTemplate, ::Val{1}) = (Vector{eltype(st)}(), nothing)
alloc_outputs(st::SequenceTemplate, ::Val{2}) = (nothing, Vector{Int}())
alloc_outputs(st::SequenceTemplate, ::Val{-1}) = (nothing, nothing)
apply_template(st::SequenceTemplate) = Base.Fix1(apply_template, st)
apply_template(st::SequenceTemplate, val::Val) = xs -> apply_template(st, val, xs)
apply_template(st::SequenceTemplate, xs) = apply_template(st, Val(0), xs)
apply_template(st::SequenceTemplate, val::Val, xs) = apply_template!(st, alloc_outputs(st, val), xs)
function apply_template!(st::SequenceTemplate, buffers::Tuple{A, B}, xs) where {A, B}
output, type_ids = process_template!(st, buffers[1], buffers[2], xs)
if !(isnothing(output) || isnothing(type_ids))
return output, type_ids
elseif !isnothing(output)
return output
elseif !isnothing(type_ids)
return type_ids
else
return nothing
end
end
(st::SequenceTemplate)(val::Val) = Base.Fix1(st, val)
(st::SequenceTemplate)(x::AbstractArray) = st(Val(0), x)
(st::SequenceTemplate{T})(xs::AbstractVector{T}...) where T = st(xs)
(st::SequenceTemplate{T})(xs::Tuple{Vararg{AbstractVector{T}}}) where T = st(Val(0), xs)
## static single sample
(st::SequenceTemplate{T})(val::Val, x::AbstractVector{T}, xs::AbstractVector{T}...) where T = apply_template(st, val, (x, xs...))
(st::SequenceTemplate{T})(val::Val, xs::Tuple{Vararg{AbstractVector{T}}}) where T = apply_template(st, val, xs)
(st::SequenceTemplate{T})(val::Val, xs::AbstractVector{<:AbstractVector{T}}) where T = apply_template(st, val, xs)
## static multiple sample
function (st::SequenceTemplate{T})(val::Val, xs::AbstractArray{<:AbstractVector{<:AbstractVector{T}}}) where T
if val == Val(0)
outputs = map(apply_template(st, Val(1)), xs)
type_ids = map(apply_template(st, Val(2)), xs)
return outputs, type_ids
elseif val == Val(-1)
foreach(apply_template(st, Val(-1)), xs)
return nothing
else
return map(apply_template(st, val), xs)
end
end
## deep nested or dynamic
@inline function _st_call(st::SequenceTemplate, val::Val, xs::AbstractArray)
return @elementmap xs st(val)(xs)
end
@inline function _st_nested(st::SequenceTemplate, val::Val, xs::AbstractArray)
if val == Val(0)
outputs = _st_call(st, Val(1), xs)
type_ids = _st_call(st, Val(2), xs)
return outputs, type_ids
elseif val == Val(-1)
foreach(st(Val(-1)), xs)
return nothing
else
return _st_call(st, val, xs)
end
end
function (st::SequenceTemplate{T})(val::Val, xs::AbstractArray) where T
if isnestedconcretetype(typeof(xs))
return _st_nested(st, val, xs)
end
aoa, naov = allany(Base.Fix2(isa, AbstractArray), xs)
aov = !naov
if aoa
if all(Base.Fix1(all, Base.Fix2(isa, T)), xs) # dynamic single sample
# xs is an array of sequence
return apply_template(st, val, xs)
elseif all(Base.Fix1(all, Base.Fix2(isa, AbstractArray)), xs) # dynamic multiple sample
# xs is an array of array of array
# return map(st(val), xs)
if val == Val(0)
outputs = map(st(Val(1)), xs)
type_ids = map(st(Val(2)), xs)
return outputs, type_ids
elseif val == Val(-1)
foreach(st(Val(-1)), xs)
return nothing
else
return map(st(val), xs)
end
# return _st_nested(st, val, xs)
else
throw(MethodError(st, xs))
end
elseif aov # dynamic single sample
# xs is a sequence
!all(Base.Fix2(isa, T), xs) && throw(MethodError(st, xs)) # assert eltype of sequence == T
return apply_template(st, val, (xs,))
else
throw(MethodError(st, xs))
end
end
_show(io, t::InputTerm) = print(io, "Input:<type=$(t.type_id)>")
_show(io, t::IndexInputTerm) = print(io, "Input[$(t.idx)]:<type=$(t.type_id)>")
_show(io, t::ConstTerm) = print(io, "$(t.value):<type=$(t.type_id)>")
function _show(io, t::RepeatedTerm)
print(io, '(')
_show(io, first(t.terms))
for term in Base.tail(t.terms)
print(io, ' ')
_show(io, term)
end
if iszero(t.dynamic_type_id)
print(io, ")...")
else
print(io, ")<type+=$(t.dynamic_type_id)>...")
end
end
Base.show(io::IO, ::MIME"text/plain", st::SequenceTemplate) = show(io, st)
function Base.show(io::IO, st::SequenceTemplate{T}) where T
print(io, "SequenceTemplate{", T, "}(")
_show(io, first(st.terms))
for term in Base.tail(st.terms)
print(io, ' ')
_show(io, term)
end
print(io, ')')
end
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 8109 | using StaticArrays
abstract type AbstractVocabulary{T} end
Base.eltype(::AbstractVocabulary{T}) where T = T
struct Vocab{T, A<:AbstractVector{T}} <: AbstractVocabulary{T}
list::A
unk::T
unki::Int
end
"""
Vocab(data::Vector{<:AbstractString}, unk::AbstractString="[UNK]")
Constructor for `Vocab`. `data` is the list of vocabulary word, can be nonunique.
The actual list will be the unique version of `data` (i.e. `vocab.list = unique(data)`).
`unk` is the indicator word for all unknown words. `unk` can be either in or not in `data`,
depends on the use case.
"""
Vocab(data::AbstractVector, unk::AbstractString="[UNK]") = Vocab{String}(data, unk)
"""
Vocab{T}(data::AbstractVector, unk) where T
construct Vocab with element type `T`. `unk` must be specified.
"""
function Vocab{T}(data::AbstractVector, unk) where T
udata = Vector{T}(undef, length(data))
unk = T(unk)
unique!(map!(T, udata, data))
list = SizedVector{length(udata)}(OverwritableLookupVector(udata))
i = findfirst(==(unk), list)
unki = isnothing(i) ? 0 : i
return Vocab(list, unk, unki)
end
Base.length(v::Vocab) = length(v.list)
function Base.show(io::IO, v::Vocab)
print(io, "Vocab{", eltype(v), ", ", nameof(typeof(v.list)), '}')
print(io, "(size = ", length(v))
print(io, ", unk = ", v.unk)
print(io, ", unki = ", v.unki, ')')
end
lookup_index(v::Vocab, word) = lookup_index(v.list, v.unki, word)
lookup_index(list::SizedVector, unki, word) = lookup_index(list.data, unki, word)
lookup_index(list, unki, word) = (i = findfirst(==(word), list); isnothing(i) ? unki : i)
lookup_word(v::Vocab, index) = lookup_word(v.list, v.unk, index)
lookup_word(list::SizedVector, unk, index) = lookup_word(list.data, unk, index)
lookup_word(list, unk, index) = 0 < index <= length(list) ? @inbounds(list[index]) : unk
lookup(v::Vocab) = Base.Fix1(lookup, v)
lookup(::Type{T}, v::Vocab) where T = lookup $ T $ v
lookup(::Type{I}, v::Vocab{T}, word::T) where {T, I<:Integer} = I(lookup_index(v, word))
lookup(::Type{I}, v::Vocab{<:Integer}, word::Integer) where I<:Integer = I(lookup_index(v, word))
lookup(::Type{I}, v::Vocab{<:AbstractString}, word::AbstractString) where I<:Integer = I(lookup_index(v, word))
lookup(::Type{T}, v::Vocab{T}, index::Integer) where T = lookup_word(v, index)
lookup(::Type{<:Integer}, v::Vocab{T}, i::Integer) where T = throw(DomainError(i, "Cannot lookup the value $i in the vocabulary: value should have the same type as Vocab's element type ($(eltype(v)))"))
lookup(v::Vocab{T}, word::T) where T = lookup(Int, v, word)
lookup(v::Vocab{<:AbstractString}, word::AbstractString) = lookup(Int, v, word)
lookup(v::Vocab{<:Integer}, index::Integer) = lookup_word(v, index)
lookup(v::Vocab, index::Integer) = lookup(eltype(v), v, index)
lookup(v::Vocab, i, j, k...) = (lookup(v, i), lookup(v, j), map(lookup(v), k)...)
lookup(v::Vocab, is::Union{AbstractArray, Tuple, NamedTuple}) = map(lookup(v), is)
lookup(::Type{T}, v::Vocab, i, j, k...) where T = lookup(T, v, (i, j, k...))
lookup(::Type{T}, v::Vocab, is::Union{AbstractArray, Tuple, NamedTuple}) where T = map(lookup(T, v), is)
lookup(::Type{OneHot}, v::Vocab, i) = lookup_onehot(v, i)
lookup(T::Type{OneHot}, v::Vocab, is::AbstractArray) = OneHotArray{length(v)}(lookup_onehot(v, is))
lookup(::Type{OneHot}, v::Vocab, is::Union{Tuple, NamedTuple}) = map(lookup(OneHot, v), is)
lookup_onehot(v::Vocab, i) = OneHot(length(v))(lookup(UInt32, v, i))
lookup_onehot(v::Vocab, is::AbstractArray) = map(lookup_onehot $ v, is)
function lookup(T::Type{OneHot}, v::Vocab, i, j, k...)
c = 0
si = lookup(T, v, i)
c += si isa OneHot ? 1 : length(parent(si))
sj = lookup(T, v, j)
c += sj isa OneHot ? 1 : length(parent(sj))
sk = map(lookup(T, v), k)
c += sum(k->k isa OneHot ? 1 : length(parent(k)), sk; init=0)
arr = Vector{OneHot(length(v))}(undef, c); empty!(arr)
si isa OneHot ? push!(arr, si) : append!(arr, parent(si))
sj isa OneHot ? push!(arr, sj) : append!(arr, parent(sj))
for k in sk
k isa OneHot ? push!(arr, k) : append!(arr, parent(k))
end
return OneHotArray(arr)
end
lookup(v::Vocab, i::OneHot) = lookup(v, Int(i))
lookup(v::Vocab, i::OneHotArray) = lookup(v, reinterpret(UInt32, i))
lookup(::Type{T}, v::Vocab{T}, i::OneHot) where T = lookup(v, i)
lookup(::Type{T}, v::Vocab{T}, i::OneHotArray) where T = lookup(v, i)
"""
lookup(v::Vocab, x)
Lookup `x` in `v`. `lookup` words depends on the type of `x`. If `x` is an integer,
return the `x`-th word on the vocabulary list (i.e. `v.list[x]`) and return the unknown word
if `x` is out-of-bound (`v.unk`). If `x` is a string, return the indice of `x` in the vocabulary
list (i.e `findfirst(==(x), v.list`) and return the unknown indice if `x` not found in the list.
If the unknown word `v.unk` is in the list, the unknown indice is its indice, otherwise 0.
This function is bidirectional except for `Vocab{<:Integer}`. For integer vocabulary, this function
only get the `x`-th word (`v.list[x]`). Use `lookup(Int, v, x)` for explicit indice lookup.
# Example
```julia
julia> vocab = Vocab(["a", "b", "c", "a", "b", "c"])
Vocab{String, StaticArrays.SizedVector{3, String, Vector{String}}}(size = 3, unk = [UNK], unki = 0)
julia> vocab_unk = Vocab(["a", "b", "xxx"], "xxx")
Vocab{String, StaticArrays.SizedVector{3, String, Vector{String}}}(size = 3, unk = xxx, unki = 3)
julia> lookup(vocab, "b")
2
julia> lookup(vocab, "d")
0
julia> lookup(vocab_unk, "d")
3
julia> lookup(vocab, 1)
"a"
julia> lookup(vocab, 10000)
"[UNK]"
julia> lookup(vocab_unk, 10000)
"xxx"
```
"""
function lookup end
@eval $((@macroexpand @doc """
lookup(Int, v::Vocab, x)
The explicit version of `lookup(v, x)`. Lookup the indice of `x` in the vocabulary
list. `x` should have the same type as Vocab's element type.
# Example
```julia
julia> vocab_unk = Vocab(["a", "b", "xxx"], "xxx")
Vocab{String, StaticArrays.SizedVector{3, String, Vector{String}}}(size = 3, unk = xxx, unki = 3)
julia> lookup(Int, vocab_unk, "b")
2
```
"""
function lookup(Int, v::Vocab, x) end
).args[2])
@eval $((@macroexpand @doc """
lookup(::Type{T}, v::Vocab{T}, i::Integer) where T
The explicit version of `lookup(v, i)`. Lookup the word at index `i` on vocabulary
list. `T` should be the same type as Vocab's element type. This method won't
work on integer vocab, use `lookup(v, i)` directly.
# Example
```julia
julia> vocab_unk = Vocab(["a", "b", "xxx"], "xxx")
Vocab{String, StaticArrays.SizedVector{3, String, Vector{String}}}(size = 3, unk = xxx, unki = 3)
julia> lookup(String, vocab_unk, 1)
"a"
```
"""
lookup(::Type{T}, v::Vocab{T}, i::Integer) where T
).args[2])
@eval $((@macroexpand @doc """
lookup(v::Vocab, is::AbstractArray)
recursively lookup value from `is`
# Example
```julia
julia> lookup(vocab, ["b", "c", "a", "A", "[UNK]"])
5-element Vector{Int64}:
2
3
1
0
0
julia> lookup(vocab, [1, "a", 0, "A", "[UNK]"])
5-element Vector{Any}:
"a"
1
"[UNK]"
0
0
```
"""
function lookup(v::Vocab, is::AbstractArray) end
).args[2])
@eval $((@macroexpand @doc """
lookup(OneHot, v::Vocab, i)
lookup `i` and convert into one-hot representation.
# Example
```julia
julia> lookup(OneHot, vocab, "a")
3-element OneHot{3}:
1
0
0
julia> lookup(OneHot, vocab, ["a" "b"; "c" "d"])
3x2x2 OneHotArray{3, 3, Matrix{OneHot{0x00000003}}}:
[:, :, 1] =
1 0
0 0
0 1
[:, :, 2] =
0 0
1 0
0 0
julia> lookup(OneHot, vocab, 3)
ERROR: DomainError with c:
cannot convert `lookup(::Vocab, 3)` = "c" into one-hot representation.
Stacktrace:
[...]
```
"""
function lookup(::Type{OneHot}, v::Vocab, i) end
).args[2])
@eval $((@macroexpand @doc """
lookup(v::Vocab, i::OneHotArray)
convert the one-hot representation back into words.
# Example
```julia
julia> lookup(OneHot, vocab, ["a" "b"; "c" "d"])
3x2x2 OneHotArray{3, 3, Matrix{OneHot{0x00000003}}}:
[:, :, 1] =
1 0
0 0
0 1
[:, :, 2] =
0 0
1 0
0 0
julia> lookup(vocab, ans)
2×2 Matrix{String}:
"a" "b"
"c" "[UNK]"
```
"""
function lookup(v::Vocab, i::OneHotArray) end
).args[2])
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | code | 48071 | using TextEncodeBase
using RustRegex
using Test
# quick and dirty macro for making @inferred as test case
macro test_inferred(ex)
esc(quote
@test begin
@inferred $ex
true
end
end)
end
using TextEncodeBase: AbstractTokenizer, AbstractTokenization,
BaseTokenization, NestedTokenizer, FlatTokenizer,
WordTokenization, EachSplitTokenization,
EachMatchTokenization, MatchSplitsTokenization,
IndexedTokenization, MatchTokenization,
UnicodeNormalizer, CodeNormalizer, CodeUnMap,
SentenceFuncNormalizer, WordFuncNormalizer,
SentenceReplaceNormalizer, WordReplaceNormalizer,
TokenStages, Document, Sentence, Word, Token, Batch
using TextEncodeBase: getvalue, getmeta, updatevalue,
with_head_tail, trunc_and_pad, trunc_or_pad, nested2batch, batch2nested, nestedcall, join_text
using TextEncodeBase: SequenceTemplate, InputTerm, IndexInputTerm, ConstTerm, RepeatedTerm
using WordTokenizers
const ATR = AbstractTokenizer
const AT = AbstractTokenization
const BT = BaseTokenization
struct CharTk <: BT end
TextEncodeBase.splitting(::CharTk, x::Word) = split(x.x, "")
TextEncodeBase.splittability(::CharTk, x::Word) = TextEncodeBase.Splittable()
function gpt2_tokenizer(text)
pattern = r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"
return map(x->x.match, eachmatch(pattern, text))
end
@testset "TextEncodeBase.jl" begin
@testset "Tokenize" begin
document = Document("This is the first sentence. And the second one with some number 12345.")
sentence = Sentence("A single sentence with 31 char.")
word = Word("word")
@testset "base tokenizer" begin
tkr = FlatTokenizer()
@test tkr(document) == map(Token, mapfoldl(nltk_word_tokenize, append!, split_sentences(document.x)))
@test tkr(sentence) == map(Token, nltk_word_tokenize(sentence.x))
@test tkr(word) == [Token(word.x)]
end
@testset "edge case" begin
tkr = FlatTokenizer()
@test tkr(Document("")) == []
@test tkr(Sentence("")) == []
@test tkr(Word("")) == []
@test tkr(Token("")) == []
end
@testset "word tokenizer" begin
tkr = FlatTokenizer(WordTokenization(tokenize=poormans_tokenize))
tkr2 = FlatTokenizer()
@test tkr(document) == map(Token, mapfoldl(poormans_tokenize, append!, split_sentences(document.x)))
@test tkr(sentence) == map(Token, poormans_tokenize(sentence.x))
@test tkr(word) == [Token(word.x)]
@test tkr(document) != tkr2(document)
@test tkr(sentence) != tkr2(sentence)
end
@testset "split tokenizer" begin
tkr_s = FlatTokenizer(EachSplitTokenization([' ', ',', '.']))
tkr_m = FlatTokenizer(EachMatchTokenization(r"[^\. ]+|\."))
@test map(getvalue, tkr_s(document)) == filter(!=("."), mapfoldl(nltk_word_tokenize, append!, split_sentences(document.x)))
@test map(getvalue, tkr_s(sentence)) == filter(!=("."), nltk_word_tokenize(sentence.x))
@test tkr_s(word) == [Token(word.x)]
@test map(getvalue, tkr_m(document)) == mapfoldl(nltk_word_tokenize, append!, split_sentences(document.x))
@test map(getvalue, tkr_m(sentence)) == nltk_word_tokenize(sentence.x)
@test tkr_m(word) == [Token(word.x)]
@test map(getvalue, tkr_s(Sentence("a b"))) == ["a", "b"]
@test map(getvalue, tkr_m(Sentence("a b"))) == ["a", "b"]
end
@testset "matchsplits tokenizer" begin
tkr_a = FlatTokenizer(MatchSplitsTokenization(" "))
tkr_b = FlatTokenizer(MatchSplitsTokenization(","))
@test join(map(getvalue, tkr_a(sentence))) == sentence.x
@test map(getvalue, tkr_b(Sentence("a, b, c,d"))) == ["a", ",", " b", ",", " c", ",", "d"]
x = Sentence("a,, b, c,d")
tkr_c = FlatTokenizer(MatchSplitsTokenization([rure",,", rure", "]))
@test join(map(getvalue, tkr_c(x))) == x.x
@test map(getvalue, tkr_c(x)) == ["a", ",,", " b", ", ", "c,d"]
end
@testset "index tokenizer" begin
tkr = FlatTokenizer(IndexedTokenization())
@test tkr(document) == begin
sentences = split_sentences(document.x)
words = map(x->nltk_word_tokenize(x), sentences)
tokens = Token[]
for (i, s) in enumerate(words)
for (j, w) in enumerate(s)
push!(tokens, Token(w, (sentence_id = i, word_id = j, token_id = j)))
end
end
tokens
end
@test tkr(sentence) == begin
words = nltk_word_tokenize(sentence.x)
tokens = Token[]
for (i, w) in enumerate(words)
push!(tokens, Token(w, (word_id = i, token_id = i)))
end
tokens
end
@test tkr(word) == [Token(word.x, (word_id = 1, token_id = 1))]
end
@testset "match tokenizer" begin
for pats in (
[r"\d", r"en"],
[rure"\d", rure"en"],
[r"\d", rure"en"],
[rure"\d", r"en"],
)
tkr = FlatTokenizer(MatchTokenization(pats))
@test map(getvalue, tkr(document)) == [
"This", "is", "the", "first", "s",
"en", "t", "en", "ce", ".", "And",
"the", "second", "one", "with", "some",
"number", "1", "2", "3", "4", "5", ".",
]
@test map(getvalue, tkr(sentence)) == [
"A", "single", "s", "en", "t", "en",
"ce", "with", "3", "1", "char", ".",
]
@test map(getvalue, tkr(word)) == [word.x]
@test map(getvalue, tkr(Word("123"))) == ["1", "2", "3"]
end
end
@testset "unicode normalizer" begin
tkr = FlatTokenizer(UnicodeNormalizer(; casefold = true))
@test tkr(updatevalue(uppercase, document)) ==
map(Token, mapfoldl(nltk_word_tokenize, append!, lowercase.(split_sentences(document.x))))
@test tkr(updatevalue(uppercase, sentence)) == map(Token, nltk_word_tokenize(lowercase(sentence.x)))
@test tkr(updatevalue(uppercase, word)) == [Token(lowercase(word.x))]
end
@testset "replace normalizer" begin
tkr = FlatTokenizer(TextEncodeBase.ReplaceNormalizer(r"\d+"=>"NUMBER"))
@test map(getvalue, tkr(document)) ==
map(x->replace(x, r"\d+"=>"NUMBER"),
mapfoldl(nltk_word_tokenize, append!, split_sentences(document.x)))
@test map(getvalue, tkr(sentence)) ==
map(x->replace(x, r"\d+"=>"NUMBER"), nltk_word_tokenize(sentence.x))
@test tkr(word) == [Token(word.x)]
tkr1 = FlatTokenizer(SentenceReplaceNormalizer(r"(.+)"=>s"--\1"))
@test map(getvalue, tkr1(document)) ==
mapfoldl(nltk_word_tokenize ∘ Base.Fix1(*, "--"), append!, split_sentences(document.x))
@test map(getvalue, tkr1(sentence)) == nltk_word_tokenize("--" * sentence.x)
@test tkr1(word) == [Token("--" * word.x)]
tkr2 = FlatTokenizer(WordReplaceNormalizer(r"(.+)"=>s"--\1"))
@test map(getvalue, tkr2(document)) == map(
Base.Fix1(*, "--"), mapfoldl(nltk_word_tokenize, append!, split_sentences(document.x)))
@test map(getvalue, tkr2(sentence)) == map(Base.Fix1(*, "--"), nltk_word_tokenize(sentence.x))
@test tkr2(word) == [Token("--" * word.x)]
end
@testset "func normalizer" begin
tkr1 = FlatTokenizer(SentenceFuncNormalizer(Base.Fix2(replace, r"(.+)"=>s"--\1")))
@test map(getvalue, tkr1(document)) ==
mapfoldl(nltk_word_tokenize ∘ Base.Fix1(*, "--"), append!, split_sentences(document.x))
@test map(getvalue, tkr1(sentence)) == nltk_word_tokenize("--" * sentence.x)
@test tkr1(word) == [Token("--" * word.x)]
tkr2 = FlatTokenizer(WordFuncNormalizer(Base.Fix2(replace, r"(.+)"=>s"--\1")))
@test map(getvalue, tkr2(document)) == map(
Base.Fix1(*, "--"), mapfoldl(nltk_word_tokenize, append!, split_sentences(document.x)))
@test map(getvalue, tkr2(sentence)) == map(Base.Fix1(*, "--"), nltk_word_tokenize(sentence.x))
@test tkr2(word) == [Token("--" * word.x)]
end
@testset "code normalizer" begin
tkr = FlatTokenizer(CodeNormalizer('a':'z'=>'A':'Z', 'A':'Z'=>'a':'z'))
@test tkr(updatevalue(uppercase, document)) ==
map(Token, mapfoldl(nltk_word_tokenize, append!, lowercase.(split_sentences(document.x))))
@test tkr(updatevalue(uppercase, sentence)) == map(Token, nltk_word_tokenize(lowercase(sentence.x)))
@test tkr(updatevalue(uppercase, word)) == [Token(lowercase(word.x))]
@test tkr(updatevalue(lowercase, sentence)) == map(Token, nltk_word_tokenize(uppercase(sentence.x)))
@test tkr(updatevalue(lowercase, word)) == [Token(uppercase(word.x))]
tkr2 = FlatTokenizer(CodeNormalizer(
WordTokenization(tokenize=gpt2_tokenizer),
[(0:32, 256:288), (127:160, 289:322), 173=>323]
))
@test map(getvalue, tkr2(Document("This is a 😺"))) == ["This", "Ġis", "Ġa", "ĠðŁĺº"]
unmap = CodeUnMap(tkr2.tokenization.codemap)
@test map(unmap, ["This", "Ġis", "Ġa", "ĠðŁĺº"]) == ["This", " is", " a", " 😺"]
end
@testset "match unicode normalized tokenizer" begin
for pats in (
["This", "A", "en", r"\d"],
["This", "A", "en", rure"\d"],
)
tkr = FlatTokenizer(MatchTokenization(UnicodeNormalizer(; casefold = true), pats))
@test map(getvalue, tkr(document)) == [
"This", "is", "the", "first", "s",
"en", "t", "en", "ce", ".", "A", "nd",
"the", "second", "one", "with", "some",
"number", "1", "2", "3", "4", "5", ".",
]
@test map(getvalue, tkr(sentence)) == [
"A", "single", "s", "en", "t", "en",
"ce", "with", "3", "1", "char", ".",
]
@test map(getvalue, tkr(word)) == [word.x]
@test map(getvalue, tkr(Word("123"))) == ["1", "2", "3"]
end
end
@testset "match code normalized tokenizer" begin
for pats in (
["This", "A", "en", r"\d"],
["This", "A", "en", rure"\d"],
)
tkr = FlatTokenizer(MatchTokenization(CodeNormalizer('a':'z'=>'A':'Z', 'A':'Z'=>'a':'z'), pats))
@test map(getvalue, tkr(document)) == [
"This", "IS", "THE", "FIRST", "S",
"en", "T", "en", "CE", ".", "A", "ND",
"THE", "SECOND", "ONE", "WITH", "SOME",
"NUMBER", "1", "2", "3", "4", "5", ".",
]
@test map(getvalue, tkr(sentence)) == [
"A", "SINGLE", "S", "en", "T", "en",
"CE", "WITH", "3", "1", "CHAR", ".",
]
@test map(getvalue, tkr(word)) == ["WORD"]
@test map(getvalue, tkr(Word("123"))) == ["1", "2", "3"]
end
end
@testset "indexed match tokenizer" begin
for pats in (
[r"\d", r"en"],
[rure"\d", r"en"],
[rure"\d", rure"en"],
)
tkr = FlatTokenizer(IndexedTokenization(MatchTokenization(pats)))
@test map(getvalue, tkr(document)) == [
"This", "is", "the", "first", "s",
"en", "t", "en", "ce", ".", "And",
"the", "second", "one", "with", "some",
"number", "1", "2", "3", "4", "5", ".",
]
@test map(getmeta, tkr(document)) == begin
m = [false, false, false, false, false, true, false, true, false, false, false,
false, false, false, false, false, false, true, true, true, true, true, false]
s = Iterators.flatten((Iterators.repeated(1, 10), Iterators.repeated(2, 13)))
w = Iterators.flatten((1:10, 1:13))
map(NamedTuple{(:sentence_id, :ismatch, :word_id, :token_id)}, zip(s, m, w, w))
end
@test map(getvalue, tkr(sentence)) == [
"A", "single", "s", "en", "t", "en",
"ce", "with", "3", "1", "char", ".",
]
sentence_match = [false, false, false, true, false, true, false, false, true, true, false, false]
@test map(getmeta, tkr(sentence)) == map(NamedTuple{(:ismatch, :word_id, :token_id)}, zip(sentence_match, 1:12, 1:12))
@test map(getvalue, tkr(word)) == [word.x]
@test map(getmeta, tkr(word)) == [(ismatch = false, word_id = 1, token_id = 1)]
@test map(getvalue, tkr(Word("123"))) == ["1", "2", "3"]
@test map(getmeta, tkr(Word("123"))) == map(NamedTuple{(:ismatch, :word_id, :token_id)}, zip([true, true, true], 1:3, 1:3))
end
end
@testset "nested output" begin
tkr = NestedTokenizer(IndexedTokenization())
@test tkr(document) == begin
sentences = split_sentences(document.x)
words = map(x->nltk_word_tokenize(x), sentences)
tokens = []
for (i, s) in enumerate(words)
push!(tokens, map(enumerate(s)) do (j, w)
Token(w, (sentence_id = i, word_id = j, token_id = j))
end)
end
tokens
end
@test tkr(sentence) == begin
words = nltk_word_tokenize(sentence.x)
tokens = [map(enumerate(words)) do (i, w)
Token(w, (word_id = i, token_id = i))
end]
tokens
end
@test tkr(word) == [Token(word.x, (word_id = 1, token_id = 1))]
end
@testset "indexed char" begin
tkr = FlatTokenizer(IndexedTokenization(CharTk()))
@test tkr(document) == begin
sentences = split_sentences(document.x)
words = map(x->nltk_word_tokenize(x), sentences)
tokens = Token[]
for (i, s) in enumerate(words)
k = 1
for (j, w) in enumerate(s)
for c in split(w, "")
push!(tokens, Token(c, (sentence_id = i, word_id = j, token_id = k)))
k += 1
end
end
end
tokens
end
@test tkr(sentence) == begin
words = nltk_word_tokenize(sentence.x)
tokens = Token[]
for (i, w) in enumerate(words)
for c in split(w, "")
push!(tokens, Token(c, (word_id = i, token_id = length(tokens)+1)))
end
end
tokens
end
@test tkr(word) == begin
chars = split(word.x, "")
tokens = Token[]
for (i, c) in enumerate(chars)
push!(tokens, Token(c, (word_id = 1, token_id = i)))
end
tokens
end
end
@testset "nested indexed match char" begin
for pats in (
[r"\d", r"en"],
[rure"\d", r"en"],
[rure"\d", rure"en"],
)
tkr = NestedTokenizer(IndexedTokenization(MatchTokenization(CharTk(), pats)))
s(x) = split(x, "")
r(x, n) = repeat(x:x, n)
@test nestedcall(getvalue, tkr(document)) == [
[
s("This"); s("is"); s("the"); s("first");
"s"; "en"; "t"; "en"; s("ce"); ".";
],
[
s("And"); s("the"); s("second"); s("one"); s("with");
s("some"); s("number"); "1"; "2"; "3"; "4"; "5"; ".";
]
]
@test nestedcall(getmeta, tkr(document)) == begin
ismatch = [
[r(false, 15); true; false; true; r(false, 3);],
[r(false, 29); r(true, 5); false;],
]
sentence_id = [r(1, 21), r(2, 35)]
word_id = [[
r(1, 4); r(2, 2); r(3, 3); r(4, 5);
5; 6; 7; 8; r(9, 2); 10;
], [
r(1, 3); r(2, 3); r(3, 6); r(4, 3); r(5, 4);
r(6, 4); r(7, 6); 8; 9; 10; 11; 12; 13;
]]
token_id = [[1:21;], [1:35;]]
map((s,m,w,t)->map(NamedTuple{(:sentence_id, :ismatch, :word_id, :token_id)}, zip(s,m,w,t)), sentence_id, ismatch, word_id, token_id)
end
@test nestedcall(getvalue, tkr(sentence)) == [
[
"A"; s("single"); "s"; "en"; "t"; "en";
s("ce"); s("with"); "3"; "1"; s("char"); ".";
]
]
@test nestedcall(getmeta, tkr(sentence)) == begin
ismatch = [r(false, 8); true; false; true; r(false, 6); r(true, 2); r(false, 5);]
word_id = [
1; r(2, 6); 3; 4; 5; 6;
r(7, 2); r(8, 4); 9; 10; r(11, 4); 12;
]
token_id = [1:24;]
[map(NamedTuple{(:ismatch, :word_id, :token_id)}, zip(ismatch, word_id, token_id))]
end
@test nestedcall(getvalue, tkr(word)) == ["w", "o", "r", "d"]
@test nestedcall(getmeta, tkr(word)) == map(NamedTuple{(:ismatch, :word_id, :token_id)}, zip(r(false, 4), r(1, 4), 1:4))
end
end
@testset "@stage" begin
@test_throws Exception @macroexpand(TextEncodeBase.@stage SomeStage{A})
@test_throws Exception @macroexpand(TextEncodeBase.@stage SomeStage{A, B, C})
@test_throws Exception @macroexpand(TextEncodeBase.@stage SomeStage{A, B} <: C D)
@test_throws Exception @macroexpand(TextEncodeBase.@stage 3)
@test_throws Exception @macroexpand(TextEncodeBase.@stage SomeStage{A}())
@test_nowarn @macroexpand(TextEncodeBase.@stage SomeStage)
@test_nowarn @macroexpand(TextEncodeBase.@stage SomeStage{A, B} <: TokenStages)
end
@testset "batch" begin
tkr = NestedTokenizer(IndexedTokenization())
document = document.x
sentence = sentence.x
another_sentence = "This is another sentence"
batch_sentence = [split_sentences(document); sentence; another_sentence]
batch_document = [document, sentence, another_sentence]
@test nestedcall(getvalue, tkr(Batch{Sentence}(batch_sentence))) == nltk_word_tokenize.(batch_sentence)
@test nestedcall(getmeta, tkr(Batch{Sentence}(batch_sentence))) ==
map(enumerate(nltk_word_tokenize.(batch_sentence))) do (i, v)
map(enumerate(v)) do (j, x)
(sentence_id = i, word_id = j, token_id = j)
end
end
@test nestedcall(getvalue, tkr(Batch{Document}(batch_document))) == map(batch_document) do doc
nltk_word_tokenize.(split_sentences(doc))
end
@test nestedcall(getmeta, tkr(Batch{Document}(batch_document))) ==
map(enumerate(split_sentences.(batch_document))) do (i, d)
map(enumerate(nltk_word_tokenize.(d))) do (j, s)
map(enumerate(s)) do (k, x)
(document_id = i, sentence_id = j, word_id = k, token_id = k)
end
end
end
end
@testset "show" begin
@test sprint(show, FlatTokenizer()) == "FlatTokenizer(default)"
@test sprint(show, FlatTokenizer(WordTokenization(tokenize=poormans_tokenize))) == "FlatTokenizer(WordTokenization(split_sentences = WordTokenizers.split_sentences, tokenize = WordTokenizers.poormans_tokenize))"
@test sprint(show, FlatTokenizer(IndexedTokenization())) == "FlatTokenizer(IndexedTokenization(default))"
@test sprint(show, FlatTokenizer(MatchTokenization([r"\d", r"en"]))) == "FlatTokenizer(MatchTokenization(default, 2 patterns))"
@test sprint(show, FlatTokenizer(UnicodeNormalizer(; casefold = true))) == "FlatTokenizer(UnicodeNormalizer(default, compose = true, casefold = true))"
@test sprint(show, FlatTokenizer(IndexedTokenization(MatchTokenization([r"\d", r"en"])))) == "FlatTokenizer(IndexedTokenization(MatchTokenization(default, 2 patterns)))"
@test sprint(show, NestedTokenizer(IndexedTokenization())) == "NestedTokenizer(IndexedTokenization(default))"
@test sprint(show, FlatTokenizer(IndexedTokenization(CharTk()))) == "FlatTokenizer(IndexedTokenization(CharTk))"
@test sprint(show, NestedTokenizer(IndexedTokenization(MatchTokenization(CharTk(), [r"\d", r"en"])))) == "NestedTokenizer(IndexedTokenization(MatchTokenization(CharTk, 2 patterns)))"
end
@testset "replace" begin
@test replace(
x->x === poormans_tokenize ? nltk_word_tokenize : x,
FlatTokenizer(WordTokenization(tokenize=poormans_tokenize))
) == FlatTokenizer(WordTokenization(tokenize=nltk_word_tokenize))
@test replace(
x->x isa IndexedTokenization ? x.base : x,
IndexedTokenization(MatchTokenization([r"\d", r"en"]))
) == MatchTokenization([r"\d", r"en"])
@test replace(
x->x isa UnicodeNormalizer ? UnicodeNormalizer(x.base, :NFC) : x,
FlatTokenizer(UnicodeNormalizer(; casefold = true))
) == FlatTokenizer(UnicodeNormalizer(:NFC))
end
end
@testset "Vocabulary" begin
vocab = Vocab(["a", "b", "c", "a", "b", "c"])
vocab_unk = Vocab(["a", "b", "xxx"], "xxx")
vocab_char = Vocab{Char}('a':'z', ' ')
vocab_int = Vocab{Int}(11:20, 0)
@test length(vocab) == 3
@test vocab.list == ["a", "b", "c"]
@test vocab.unki == 0
@test length(vocab_unk) == 3
@test vocab_unk.list == ["a", "b", "xxx"]
@test vocab_unk.unki == 3
@test vocab_int.list == collect(11:20)
@test sprint(show, vocab_int) == "Vocab{$Int, SizedArray}(size = 10, unk = 0, unki = 0)"
@testset "lookup" begin
@test lookup(vocab, "a") == 1
@test lookup(vocab, "b") == 2
@test lookup(vocab, "c") == 3
@test lookup(vocab, "d") == 0
@test lookup(vocab_unk, "a") == 1
@test lookup(vocab_unk, "b") == 2
@test lookup(vocab_unk, "c") == 3
@test lookup(vocab_unk, "d") == 3
@test lookup(vocab, 1) == "a"
@test lookup(vocab, 2) == "b"
@test lookup(vocab, 3) == "c"
@test lookup(vocab, 0) == "[UNK]"
@test lookup(vocab, 1000000) == "[UNK]"
@test lookup(vocab_unk, 1) == "a"
@test lookup(vocab_unk, 2) == "b"
@test lookup(vocab_unk, 3) == "xxx"
@test lookup(vocab_unk, 100000) == "xxx"
@test lookup(vocab, 1,2,3,4) == ("a", "b", "c", "[UNK]")
@test lookup(vocab_unk, 1,2,3,4) == ("a", "b", "xxx", "xxx")
@test lookup(vocab, [1, "a", 0, "A", "[UNK]"]) == ["a", 1, "[UNK]", 0, 0]
@test lookup(vocab_unk, [1, "a", 0, "A", "[UNK]"]) == ["a", 1, "xxx", 3, 3]
@test lookup(vocab, [(1, "a"), (a=0, b="A"), "[UNK]"]) == [("a", 1), (a="[UNK]", b=0), 0]
@test lookup(vocab_char, ['a', (x='x',)], 26) == ([1, (x=24,)], 'z')
end
@testset "lookup int" begin
@test lookup(vocab_int, 1,2,3) == (11,12,13)
@test lookup(Int, vocab_int, 1,2,3) == (0,0,0)
@test lookup(Int, vocab_int, 11,12,13) == (1,2,3)
end
@testset "onehot" begin
@test lookup(OneHot, vocab, "a") == OneHot(3, 1)
@test lookup(OneHot, vocab, "A") == OneHot(3, 0)
@test lookup(OneHot, vocab, ("a", "A")) == (OneHot(3, 1), OneHot(3, 0))
@test lookup(OneHot, vocab, (x="a", y="A")) == (x=OneHot(3, 1), y=OneHot(3, 0))
@test lookup(OneHot, vocab, "a", "b", "c", "d") == OneHotArray(3, [1,2,3,0])
@test lookup(OneHot, vocab, ["a", "b", "c", "d"]) == OneHotArray(3, [1,2,3,0])
@test lookup(OneHot, vocab, ["a" "b"; "c" "d"]) == OneHotArray(3, [1 2; 3 0])
@test lookup(OneHot, vocab, ["a", "b"], ["c", "d"]) == OneHotArray(3, [1,2,3,0])
@test lookup(OneHot, vocab, ["a"], "b", ["c", "d"], "z") == OneHotArray(3, [1,2,3,0,0])
@test_throws DomainError lookup(OneHot, vocab, 1)
@test_throws DomainError lookup(OneHot, vocab, [1,2,3])
@test_throws DomainError lookup(OneHot, vocab, "1",2,3)
@test_throws DomainError lookup(OneHot, vocab, ["1",2,3])
@test lookup(vocab, OneHot(3, 1)) == "a"
@test lookup(vocab, OneHot(5, 1)) == "a"
@test lookup(vocab, OneHot(5, 4)) == "[UNK]"
@test lookup(vocab, OneHotArray(3, [1,2,3,0])) == ["a", "b", "c", "[UNK]"]
@test lookup(vocab, OneHotArray(3, [1 2 3; 3 0 1])) == ["a" "b" "c"; "c" "[UNK]" "a"]
end
@testset "overwrite" begin
vocab_overwrite = Vocab(["a", "b", "c"])
@test_throws AssertionError vocab_overwrite.list.data["a"] = "c"
@test_throws BoundsError vocab_overwrite.list.data[0] = "x"
vocab_overwrite.list.data["b"] = "x"
@test vocab_overwrite.list == ["a", "x", "c"]
@test lookup(vocab_overwrite, 2) == "x"
@test lookup(vocab_overwrite, "b") == 0
@test lookup(vocab_overwrite, "x") == 2
end
end
@testset "Utils" begin
@test_inferred nestedcall(x->x+1, [[[[3],[5, 6]]]])
@testset "with_head_tail" begin
x = collect(1:5)
@test with_head_tail(x, 0, 6) == collect(0:6)
@test with_head_tail(x, nothing, 6) == collect(1:6)
@test with_head_tail(x, 0, nothing) == collect(0:5)
@test with_head_tail(x, nothing, nothing) == x
@test with_head_tail(0, 6)(x) == collect(0:6)
@test with_head_tail(nothing, 6)(x) == collect(1:6)
@test with_head_tail(0, nothing)(x) == collect(0:5)
@test with_head_tail(nothing, nothing)(x) == x
@test with_head_tail(x; head=0, tail=6) == collect(0:6)
@test with_head_tail(x, tail=6) == collect(1:6)
@test with_head_tail(x, head=0) == collect(0:5)
@test with_head_tail(head=0, tail=6)(x) == collect(0:6)
@test with_head_tail(tail=6)(x) == collect(1:6)
@test with_head_tail(head=0)(x) == collect(0:5)
@test with_head_tail(AbstractVector[[x], 1:5, 2:3], -1, -2) == [[[-1;x;-2]], [-1; 1:5; -2], [-1; 2:3; -2]]
@test with_head_tail(Any[Any[x], 1:5, 2:3], -1, -2) == [[[-1;x;-2]], [-1; 1:5; -2], [-1; 2:3; -2]]
@test with_head_tail(Any[Any[Any[0,1,2]]], 5, 5) == [[[5,0,1,2,5]]]
end
@testset "trunc_or_pad" begin
@testset "trunc=tail pad=tail" begin
x = collect(1:9)
@test trunc_or_pad(x, 5, 0) == collect(1:5)
@test trunc_or_pad(1:3, 5, 0) == [1:3; 0; 0]
@test trunc_or_pad(x, nothing, 0) == collect(1:9)
@test trunc_or_pad(1:3, nothing, 0) == collect(1:3)
@test trunc_or_pad(5, 0)(x) == collect(1:5)
@test trunc_or_pad(5, 0)(1:3) == [1:3; 0; 0]
@test trunc_or_pad(nothing, 0)(x) == collect(1:9)
@test trunc_or_pad(nothing, 0)(1:3) == collect(1:3)
@test trunc_or_pad(AbstractVector[[x], 1:5, 2:3], 7, -1) ==
[[collect(1:7)], [1:5; -1; -1], [2:3; fill(-1, 5)]]
@test trunc_or_pad(AbstractVector[[x], 1:5, 2:3], nothing, -1) ==
[[collect(1:9)], [1:5; fill(-1, 4)], [2:3; fill(-1, 7)]]
@test trunc_or_pad(Any[Any[[0,0], 1:10], [1]], 7, -1) ==
[[[0; 0; fill(-1,5)], collect(1:7)], [1; fill(-1,6)]]
@test trunc_or_pad(Any[Any[Any[0,1,2]]], 5, 0) == [[[0,1,2,0,0]]]
end
@testset "trunc=tail pad=head" begin
x = collect(1:9)
@test trunc_or_pad(x, 5, 0, :tail, :head) == collect(1:5)
@test trunc_or_pad(1:3, 5, 0, :tail, :head) == [0; 0; 1:3]
@test trunc_or_pad(x, nothing, 0, :tail, :head) == collect(1:9)
@test trunc_or_pad(1:3, nothing, 0, :tail, :head) == collect(1:3)
@test trunc_or_pad(5, 0, :tail, :head)(x) == collect(1:5)
@test trunc_or_pad(5, 0, :tail, :head)(1:3) == [0; 0; 1:3]
@test trunc_or_pad(nothing, 0, :tail, :head)(x) == collect(1:9)
@test trunc_or_pad(nothing, 0, :tail, :head)(1:3) == collect(1:3)
@test trunc_or_pad(AbstractVector[[x], 1:5, 2:3], 7, -1, :tail, :head) ==
[[collect(1:7)], [-1; -1; 1:5], [fill(-1, 5); 2:3]]
@test trunc_or_pad(AbstractVector[[x], 1:5, 2:3], nothing, -1, :tail, :head) ==
[[collect(1:9)], [fill(-1, 4); 1:5], [fill(-1, 7); 2:3]]
@test trunc_or_pad(Any[Any[[0,0], 1:10], [1]], 7, -1, :tail, :head) ==
[[[fill(-1,5); 0; 0], collect(1:7)], [fill(-1,6); 1]]
@test trunc_or_pad(Any[Any[Any[0,1,2]]], 5, 0, :tail, :head) == [[[0,0,0,1,2]]]
end
@testset "trunc=head pad=tail" begin
x = collect(1:9)
@test trunc_or_pad(x, 5, 0, :head, :tail) == collect(5:9)
@test trunc_or_pad(1:3, 5, 0, :head, :tail) == [1:3; 0; 0]
@test trunc_or_pad(x, nothing, 0, :head, :tail) == collect(1:9)
@test trunc_or_pad(1:3, nothing, 0, :head, :tail) == collect(1:3)
@test trunc_or_pad(5, 0, :head, :tail)(x) == collect(5:9)
@test trunc_or_pad(5, 0, :head, :tail)(1:3) == [1:3; 0; 0]
@test trunc_or_pad(nothing, 0, :head, :tail)(x) == collect(1:9)
@test trunc_or_pad(nothing, 0, :head, :tail)(1:3) == collect(1:3)
@test trunc_or_pad(AbstractVector[[x], 1:5, 2:3], 7, -1, :head, :tail) ==
[[collect(3:9)], [1:5; -1; -1], [2:3; fill(-1, 5)]]
@test trunc_or_pad(AbstractVector[[x], 1:5, 2:3], nothing, -1, :head, :tail) ==
[[collect(1:9)], [1:5; fill(-1, 4)], [2:3; fill(-1, 7)]]
@test trunc_or_pad(Any[Any[[0,0], 1:10], [1]], 7, -1, :head, :tail) ==
[[[0; 0; fill(-1,5)], collect(4:10)], [1; fill(-1,6)]]
@test trunc_or_pad(Any[Any[Any[0,1,2]]], 5, 0, :head, :tail) == [[[0,1,2,0,0]]]
end
@testset "trunc=head pad=head" begin
x = collect(1:9)
@test trunc_or_pad(x, 5, 0, :head, :head) == collect(5:9)
@test trunc_or_pad(1:3, 5, 0, :head, :head) == [0; 0; 1:3]
@test trunc_or_pad(x, nothing, 0, :head, :head) == collect(1:9)
@test trunc_or_pad(1:3, nothing, 0, :head, :head) == collect(1:3)
@test trunc_or_pad(5, 0, :head, :head)(x) == collect(5:9)
@test trunc_or_pad(5, 0, :head, :head)(1:3) == [0; 0; 1:3]
@test trunc_or_pad(nothing, 0, :head, :head)(x) == collect(1:9)
@test trunc_or_pad(nothing, 0, :head, :head)(1:3) == collect(1:3)
@test trunc_or_pad(AbstractVector[[x], 1:5, 2:3], 7, -1, :head, :head) ==
[[collect(3:9)], [-1; -1; 1:5], [fill(-1, 5); 2:3]]
@test trunc_or_pad(AbstractVector[[x], 1:5, 2:3], nothing, -1, :head, :head) ==
[[collect(1:9)], [fill(-1, 4); 1:5], [fill(-1, 7); 2:3]]
@test trunc_or_pad(Any[Any[[0,0], 1:10], [1]], 7, -1, :head, :head) ==
[[[fill(-1,5); 0; 0], collect(4:10)], [fill(-1,6); 1]]
@test trunc_or_pad(Any[Any[Any[0,1,2]]], 5, 0, :head, :head) == [[[0,0,0,1,2]]]
end
end
@testset "trunc_and_pad" begin
@testset "trunc=tail pad=tail" begin
x = collect(1:9)
@test trunc_and_pad(x, 5, 0) == collect(1:5)
@test trunc_and_pad(1:3, 5, 0) == [1:3;]
@test trunc_and_pad(x, nothing, 0) == collect(1:9)
@test trunc_and_pad(1:3, nothing, 0) == collect(1:3)
@test trunc_and_pad(5, 0)(x) == collect(1:5)
@test trunc_and_pad(5, 0)(1:3) == [1:3;]
@test trunc_and_pad(nothing, 0)(x) == collect(1:9)
@test trunc_and_pad(nothing, 0)(1:3) == collect(1:3)
@test trunc_and_pad(AbstractVector[[x], 1:5, 2:3], 7, -1) ==
[[collect(1:7)], [1:5; -1; -1], [2:3; fill(-1, 5)]]
@test trunc_and_pad(AbstractVector[[x], 1:5, 2:3], nothing, -1) ==
[[collect(1:9)], [1:5; fill(-1, 4)], [2:3; fill(-1, 7)]]
@test trunc_and_pad(Any[Any[[0,0], 1:10], [1]], 7, -1) ==
[[[0; 0; fill(-1,5)], collect(1:7)], [1; fill(-1,6)]]
@test trunc_and_pad(Any[Any[Any[0,1,2]]], 5, 0) == [[[0,1,2]]]
end
@testset "trunc=tail pad=head" begin
x = collect(1:9)
@test trunc_and_pad(x, 5, 0, :tail, :head) == collect(1:5)
@test trunc_and_pad(1:3, 5, 0, :tail, :head) == [1:3;]
@test trunc_and_pad(x, nothing, 0, :tail, :head) == collect(1:9)
@test trunc_and_pad(1:3, nothing, 0, :tail, :head) == collect(1:3)
@test trunc_and_pad(5, 0, :tail, :head)(x) == collect(1:5)
@test trunc_and_pad(5, 0, :tail, :head)(1:3) == [1:3;]
@test trunc_and_pad(nothing, 0, :tail, :head)(x) == collect(1:9)
@test trunc_and_pad(nothing, 0, :tail, :head)(1:3) == collect(1:3)
@test trunc_and_pad(AbstractVector[[x], 1:5, 2:3], 7, -1, :tail, :head) ==
[[collect(1:7)], [-1; -1; 1:5], [fill(-1, 5); 2:3]]
@test trunc_and_pad(AbstractVector[[x], 1:5, 2:3], nothing, -1, :tail, :head) ==
[[collect(1:9)], [fill(-1, 4); 1:5], [fill(-1, 7); 2:3]]
@test trunc_and_pad(Any[Any[[0,0], 1:10], [1]], 7, -1, :tail, :head) ==
[[[fill(-1,5); 0; 0], collect(1:7)], [fill(-1,6); 1]]
@test trunc_and_pad(Any[Any[Any[0,1,2]]], 5, 0, :tail, :head) == [[[0,1,2]]]
end
@testset "trunc=head pad=tail" begin
x = collect(1:9)
@test trunc_and_pad(x, 5, 0, :head, :tail) == collect(5:9)
@test trunc_and_pad(1:3, 5, 0, :head, :tail) == [1:3;]
@test trunc_and_pad(x, nothing, 0, :head, :tail) == collect(1:9)
@test trunc_and_pad(1:3, nothing, 0, :head, :tail) == collect(1:3)
@test trunc_and_pad(5, 0, :head, :tail)(x) == collect(5:9)
@test trunc_and_pad(5, 0, :head, :tail)(1:3) == [1:3;]
@test trunc_and_pad(nothing, 0, :head, :tail)(x) == collect(1:9)
@test trunc_and_pad(nothing, 0, :head, :tail)(1:3) == collect(1:3)
@test trunc_and_pad(AbstractVector[[x], 1:5, 2:3], 7, -1, :head, :tail) ==
[[collect(3:9)], [1:5; -1; -1], [2:3; fill(-1, 5)]]
@test trunc_and_pad(AbstractVector[[x], 1:5, 2:3], nothing, -1, :head, :tail) ==
[[collect(1:9)], [1:5; fill(-1, 4)], [2:3; fill(-1, 7)]]
@test trunc_and_pad(Any[Any[[0,0], 1:10], [1]], 7, -1, :head, :tail) ==
[[[0; 0; fill(-1,5)], collect(4:10)], [1; fill(-1,6)]]
@test trunc_and_pad(Any[Any[Any[0,1,2]]], 5, 0, :head, :tail) == [[[0,1,2]]]
end
@testset "trunc=head pad=head" begin
x = collect(1:9)
@test trunc_and_pad(x, 5, 0, :head, :head) == collect(5:9)
@test trunc_and_pad(1:3, 5, 0, :head, :head) == [1:3;]
@test trunc_and_pad(x, nothing, 0, :head, :head) == collect(1:9)
@test trunc_and_pad(1:3, nothing, 0, :head, :head) == collect(1:3)
@test trunc_and_pad(5, 0, :head, :head)(x) == collect(5:9)
@test trunc_and_pad(5, 0, :head, :head)(1:3) == [1:3;]
@test trunc_and_pad(nothing, 0, :head, :head)(x) == collect(1:9)
@test trunc_and_pad(nothing, 0, :head, :head)(1:3) == collect(1:3)
@test trunc_and_pad(AbstractVector[[x], 1:5, 2:3], 7, -1, :head, :head) ==
[[collect(3:9)], [-1; -1; 1:5], [fill(-1, 5); 2:3]]
@test trunc_and_pad(AbstractVector[[x], 1:5, 2:3], nothing, -1, :head, :head) ==
[[collect(1:9)], [fill(-1, 4); 1:5], [fill(-1, 7); 2:3]]
@test trunc_and_pad(Any[Any[[0,0], 1:10], [1]], 7, -1, :head, :head) ==
[[[fill(-1,5); 0; 0], collect(4:10)], [fill(-1,6); 1]]
@test trunc_and_pad(Any[Any[Any[0,1,2]]], 5, 0, :head, :head) == [[[0,1,2]]]
end
end
@testset "join_text" begin
@test join_text(["a", "b", "c"]) == "abc"
@test join_text([["a", "b", "c"]]) == ["abc"]
@test join_text([[["a", "b", "c"]]]) == [["abc"]]
@static if VERSION < v"1.8"
x = cat(["a" "d"; "b" "e"; "c" "f"], ["x" "u"; "y" "v"; "z" "w"; ]; dims=3)
else
x = ["a" "d"; "b" "e"; "c" "f";;; "x" "u"; "y" "v"; "z" "w"; ]
end
@test join_text(x, " + ", " = ") == ["a + b = c" "x + y = z"; "d + e = f" "u + v = w"]
@test join_text(x, " + ") == ["a + b + c" "x + y + z"; "d + e + f" "u + v + w"]
@test join_text(x) == ["abc" "xyz"; "def" "uvw"]
end
@testset "nested2batch / batch2nested" begin
x = randn(5,4,3,2)
x_slices = [x[i:i+5-1] for i in 1:5:length(x)]
y = [[[x_slices[1],x_slices[2],x_slices[3],x_slices[4]],
[x_slices[5],x_slices[6],x_slices[7],x_slices[8]],
[x_slices[9],x_slices[10],x_slices[11],x_slices[12]],],
[[x_slices[13],x_slices[14],x_slices[15],x_slices[16]],
[x_slices[17],x_slices[18],x_slices[19],x_slices[20]],
[x_slices[21],x_slices[22],x_slices[23],x_slices[24]],]]
y2 = [[cat(x_slices[1],x_slices[2],x_slices[3],x_slices[4], dims=2),
cat(x_slices[5],x_slices[6],x_slices[7],x_slices[8], dims=2),
cat(x_slices[9],x_slices[10],x_slices[11],x_slices[12], dims=2),],
[cat(x_slices[13],x_slices[14],x_slices[15],x_slices[16], dims=2),
cat(x_slices[17],x_slices[18],x_slices[19],x_slices[20], dims=2),
cat(x_slices[21],x_slices[22],x_slices[23],x_slices[24], dims=2),]]
y3 = [cat(cat(x_slices[1],x_slices[2],x_slices[3],x_slices[4], dims=2),
cat(x_slices[5],x_slices[6],x_slices[7],x_slices[8], dims=2),
cat(x_slices[9],x_slices[10],x_slices[11],x_slices[12], dims=2), dims=3),
cat(cat(x_slices[13],x_slices[14],x_slices[15],x_slices[16], dims=2),
cat(x_slices[17],x_slices[18],x_slices[19],x_slices[20], dims=2),
cat(x_slices[21],x_slices[22],x_slices[23],x_slices[24], dims=2), dims=3)]
y4 = Any[Any[Any[x_slices[1],x_slices[2],x_slices[3],x_slices[4]],
Any[x_slices[5],x_slices[6],x_slices[7],x_slices[8]],
Any[x_slices[9],x_slices[10],x_slices[11],x_slices[12]],],
Any[Any[x_slices[13],x_slices[14],x_slices[15],x_slices[16]],
Any[x_slices[17],x_slices[18],x_slices[19],x_slices[20]],
Any[x_slices[21],x_slices[22],x_slices[23],x_slices[24]],]]
x_slices_any = [Array{Any}(x[i:i+5-1]) for i in 1:5:length(x)]
y5 = [[[x_slices_any[1],x_slices_any[2],x_slices_any[3],x_slices_any[4]],
[x_slices_any[5],x_slices_any[6],x_slices_any[7],x_slices_any[8]],
[x_slices_any[9],x_slices_any[10],x_slices_any[11],x_slices_any[12]],],
[[x_slices_any[13],x_slices_any[14],x_slices_any[15],x_slices_any[16]],
[x_slices_any[17],x_slices_any[18],x_slices_any[19],x_slices_any[20]],
[x_slices_any[21],x_slices_any[22],x_slices_any[23],x_slices_any[24]],]]
@test nested2batch(y) == x
@test nested2batch(y2) == x
@test nested2batch(y3) == x
@test nested2batch(y4) == x
@test nested2batch(y5) == x
@test nested2batch(batch2nested(x)) == x
@test_throws DimensionMismatch nested2batch([[1:5], 2:6])
end
@testset "SequenceTemplate" begin
x = collect(1:5)
head_tail_template = SequenceTemplate(ConstTerm(-1), InputTerm{Int}(), ConstTerm(-2))
@test head_tail_template(x)[1] == with_head_tail(x, -1, -2)
@test head_tail_template(AbstractVector[[x], [1:5], [2:3]])[1] ==
map(x->x[1], with_head_tail(AbstractVector[[x], [1:5], [2:3]], -1, -2))
@test head_tail_template(Any[Any[x], [1:5], [2:3]])[1] ==
map(x->x[1], with_head_tail(Any[Any[x], [1:5], [2:3]], -1, -2))
@test head_tail_template(Any[Any[Any[0,1,2]]])[1] ==
with_head_tail(Any[Any[Any[0,1,2]]], -1, -2)[1]
@test_throws MethodError head_tail_template(Any[Any[x], 1:5, 2:3])
@test_throws Exception head_tail_template(Any[1:5, 2:3])
@test_throws Exception head_tail_template(Any[1:5, 2:3])
bert_template = SequenceTemplate(
ConstTerm("[CLS]", 1), InputTerm{String}(1), ConstTerm("[SEP]", 1),
RepeatedTerm(InputTerm{String}(2), ConstTerm("[SEP]", 2))
)
@test bert_template(["A"]) == (["[CLS]", "A", "[SEP]"], [1,1,1])
@test bert_template(["A"], ["B"]) == (["[CLS]", "A", "[SEP]", "B", "[SEP]"], [1,1,1,2,2])
@test bert_template(["A"], ["B"], ["C"]) ==
(["[CLS]", "A", "[SEP]", "B", "[SEP]", "C", "[SEP]"], [1,1,1,2,2,2,2])
@test bert_template([["A"], ["B"]]) == (["[CLS]", "A", "[SEP]", "B", "[SEP]"], [1,1,1,2,2])
@test bert_template(Val(1), ["A"], ["B"]) == ["[CLS]", "A", "[SEP]", "B", "[SEP]"]
@test bert_template(Val(2), [["A"], ["B"]]) == [1,1,1,2,2]
@test bert_template(Val(-1), Any["A"]) == nothing
@test bert_template([[["A"], ["B"]]]) == ([["[CLS]", "A", "[SEP]", "B", "[SEP]"]], [[1,1,1,2,2]])
@test bert_template([[[["A"], ["B"]]]]) == ([[["[CLS]", "A", "[SEP]", "B", "[SEP]"]]], [[[1,1,1,2,2]]])
@test bert_template(Val(1), [[["A"], ["B"]]]) == [["[CLS]", "A", "[SEP]", "B", "[SEP]"]]
@test bert_template(Val(2), [[["A"], ["B"]]]) == [[1,1,1,2,2]]
@test bert_template(Val(-1), [[[["A"], ["B"]]]]) == nothing
@test bert_template(Any[[["A"], ["B"]]]) == ([["[CLS]", "A", "[SEP]", "B", "[SEP]"]], [[1,1,1,2,2]])
@test bert_template([Any[["A"], ["B"]]]) == ([["[CLS]", "A", "[SEP]", "B", "[SEP]"]], [[1,1,1,2,2]])
@test bert_template([Any[Any["A"], Any["B"]]]) == ([["[CLS]", "A", "[SEP]", "B", "[SEP]"]], [[1,1,1,2,2]])
@test bert_template(Any[Any[Any["A"], Any["B"]]]) == ([["[CLS]", "A", "[SEP]", "B", "[SEP]"]], [[1,1,1,2,2]])
@test bert_template([Any[Any[Any["A"], Any["B"]]]]) ==
([[["[CLS]", "A", "[SEP]", "B", "[SEP]"]]], [[[1,1,1,2,2]]])
@test bert_template(Val(-1), [Any[Any[Any["A"], Any["B"]]]]) == nothing
bert_template2 = SequenceTemplate(
ConstTerm("[CLS]", 1), InputTerm{String}(1), ConstTerm("[SEP]", 1),
RepeatedTerm(InputTerm{String}(2), ConstTerm("[SEP]", 2); dynamic_type_id = true)
)
@test bert_template2(["A"]) == (["[CLS]", "A", "[SEP]"], [1,1,1])
@test bert_template2(["A"], ["B"]) == (["[CLS]", "A", "[SEP]", "B", "[SEP]"], [1,1,1,2,2])
@test bert_template2(["A"], ["B"], ["C"]) ==
(["[CLS]", "A", "[SEP]", "B", "[SEP]", "C", "[SEP]"], [1,1,1,2,2,3,3])
trail_template = SequenceTemplate(
IndexInputTerm{Int}(1, 1), RepeatedTerm(InputTerm{Int}(2)), IndexInputTerm{Int}(1, 1)
)
@test trail_template([3,5]) == ([3,5,3,5], [1,1,1,1])
@test trail_template([3,5],[1,2,4]) == ([3,5,1,2,4,3,5], [1,1,2,2,2,1,1])
@test SequenceTemplate(RepeatedTerm(InputTerm{Int}(3); dynamic_type_id = 2))(1:1, 2:2) == ([1,2],[3,5])
multi_repeat_template = SequenceTemplate(
ConstTerm(0,1),
RepeatedTerm(InputTerm{Int}(3), ConstTerm(1, 5), InputTerm{Int}(7); dynamic_type_id = 2),
ConstTerm(0,9)
)
@test multi_repeat_template() == ([0,0],[1,9])
@test_throws AssertionError multi_repeat_template(1:2)
@test multi_repeat_template(1:2, 3:4) == ([0,1,2,1,3,4,0], [1,3,3,5,7,7,9])
@test_throws AssertionError multi_repeat_template(1:2,3:4,5:6)
@test multi_repeat_template(1:2, 3:4,5:6,7:8) == ([0,1,2,1,3,4,5,6,1,7,8,0], [1,3,3,5,7,7,5,5,7,9,9,9])
@test sprint(show, bert_template2) ==
"SequenceTemplate{String}([CLS]:<type=1> Input:<type=1> [SEP]:<type=1> (Input:<type=2> [SEP]:<type=2>)<type+=1>...)"
@test sprint(show, trail_template) ==
"SequenceTemplate{Int64}(Input[1]:<type=1> (Input:<type=2>)... Input[1]:<type=1>)"
end
end
@testset "Encoder" begin
sentence = Sentence("A single sentence with 31 char.")
tkr = NestedTokenizer(IndexedTokenization(CharTk()))
vocab = Vocab(map(string, ['a':'z'; 'A':'Z']))
enc = TextEncoder(tkr, vocab, nested2batch∘nestedcall(getvalue))
s(x) = mapfoldl(y->split(y,""), append!, split(x); init=String[])
@test encode(enc, sentence) == reshape(lookup(OneHot, vocab, s(sentence.x)), Val(3))
@test decode(enc, encode(enc, sentence)) == lookup(vocab, reshape(lookup(OneHot, vocab, s(sentence.x)), Val(3)))
enc2 = TextEncoder(tkr, vocab) do e
nested2batch∘TextEncodeBase.process(e)
end
@test enc == enc2
end
end
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | docs | 10958 | # TextEncodeBase
[](https://chengchingwen.github.io/TextEncodeBase.jl/stable)
[](https://chengchingwen.github.io/TextEncodeBase.jl/dev)
[](https://github.com/chengchingwen/TextEncodeBase.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/chengchingwen/TextEncodeBase.jl)
An api for encoding text, built on top of [WordTokenizers.jl](https://github.com/JuliaText/WordTokenizers.jl).
Providing a framework to easily define custom methods to convert strings into indices.
# Usages
Here are some explanation and examples for using `TextEncodeBase.jl`, you can also find other information
from the [docs](https://chengchingwen.github.io/TextEncodeBase.jl/dev) or [test](/test/runtests.jl)
## Vocabulary
The vocabulary part contains only two api, the `Vocab` struct and the `lookup` function.
The `lookup` function is bidirectional (convert string to indices and back).
```julia
julia> vocab = Vocab(["a", "b", "c", "a", "b", "c"])
Vocab{String, StaticArrays.SizedVector{3, String, Vector{String}}}(size = 3, unk = [UNK], unki = 0)
julia> vocab_unk = Vocab(["a", "b", "xxx"], "xxx")
Vocab{String, StaticArrays.SizedVector{3, String, Vector{String}}}(size = 3, unk = xxx, unki = 3)
julia> lookup(vocab, "b")
2
julia> lookup(vocab, "d")
0
julia> lookup(vocab_unk, "d")
3
julia> lookup(vocab, 1)
"a"
julia> lookup(vocab, 10000)
"[UNK]"
julia> lookup(vocab_unk, 10000)
"xxx"
julia> lookup(vocab, ["b", "c", "a", "A", "[UNK]"])
5-element Vector{Int64}:
2
3
1
0
0
julia> lookup(OneHot, vocab, "a")
3-element OneHot{3}:
1
0
0
julia> lookup(OneHot, vocab, 3)
ERROR: DomainError with c:
cannot convert `lookup(::Vocab, 3)` = "c" into one-hot representation.
Stacktrace:
[...]
julia> oha = lookup(OneHot, vocab, ["a" "b"; "c" "d"])
3x2x2 OneHotArray{3, 3, Matrix{OneHot{0x00000003}}}:
[:, :, 1] =
1 0
0 0
0 1
[:, :, 2] =
0 0
1 0
0 0
julia> lookup(vocab, oha)
2×2 Matrix{String}:
"a" "b"
"c" "[UNK]"
```
## Pipelines
*Reexport from [FuncPipelines.jl](https://github.com/chengchingwen/FuncPipelines.jl)*
The Pipeline api help you define a series of functions that can easily be decomposed and then combined with
other function to form a new pipeline. A function (`Pipeline`) is tagged with one (or multiple) `Symbol`s.
The return values of that `Pipeline` will be bound to those symbols storing in a `NamedTuple`. Precisely,
A `Pipeline` take two inputs, a regular input value (`source`) and a `NamedTuple` (`target`) that stores
the results, applying the function to them, and then store the result with the name it carried with into `target`.
We can then chaining multiple `Pipeline`s into a `Pipelines`. For example:
```julia
julia> pipes = Pipeline{:x}(identity, 1) |> Pipeline{(:sinx, :cosx)}((x,y)->sincos(x))
julia> pipes(0.3)
(x = 0.3, sinx = 0.29552020666133955, cosx = 0.955336489125606)
# define a series of function
julia> pipes = Pipeline{:θ}(Base.Fix1(*, 2), 1) |>
Pipeline{(:sinθ, :cosθ)}(sincos, :θ) |>
Pipeline{:tanθ}(2) do target
target.sinθ / target.cosθ
end
Pipelines:
target[θ] := *(2, source)
target[(sinθ, cosθ)] := sincos(target.θ)
target[tanθ] := #68(target)
# get the wanted results
julia> pipes2 = pipes |> PipeGet{(:tanθ, :θ)}()
Pipelines:
target[θ] := *(2, source)
target[(sinθ, cosθ)] := sincos(target.θ)
target[tanθ] := #68(target)
target := (target.tanθ, target.θ)
julia> pipes2(ℯ)
(tanθ = -1.1306063769531505, θ = 5.43656365691809)
# replace some functions in pipeline
julia> pipes3 = pipes2[1] |> Pipeline{:tanθ}(tan, :θ) |> pipes2[end]
Pipelines:
target[θ] := *(2, source)
target[tanθ] := tan(target.θ)
target := (target.tanθ, target.θ)
julia> pipes3(ℯ)
(tanθ = -1.1306063769531507, θ = 5.43656365691809)
# and the pipelines is type stable
julia> using Test; @inferred pipes3(ℯ)
(tanθ = -1.1306063769531507, θ = 5.43656365691809)
```
## Tokenizer
The tokenizer part is built ontop of `WordTokenizers.jl` and provide a high-level api
to control/augment the tokenization. There're some differences between `WordTokenizers.jl`.
`WordTokenizers.jl` provides a set of tokenizers and a low-level api (`TokenBuffer`) for define
custom tokenizers. It's mainly focus on how to split a setnece into tokens. We, on the other hand,
focus on how to combine different tokenizer or include other information during the tokenization.
For example, sometimes you might want to prevent urls from being splited or add some extra tags to it,
these can be done by defining a custom `AbstractTokenizer` and overload some methods. Besides, we
force the user to explicit wrap the input as one of the stages (`Document`/`Sentence`/`Word`/...),
so no confusion.
### Example of using the Tokenizer api
Here is an example that wrapped the word tokenizer and wordpiece from `Transformers.jl` into our Tokenizer api.
```julia
using Transformers
using Transformers.Pretrain
using Transformers.BidirectionalEncoder: WordPiece, bert_cased_tokenizer
using TextEncodeBase
using TextEncodeBase: NestedTokenizer, BaseTokenization, Sentence, Word, SubWord, getvalue, Splittable
struct BertCasedTokenization <: BaseTokenization
wordpiece::WordPiece
end
# split sentence with `bert_cased_tokenizer` (define with WordTokenizers.jl's `TokenBuffer`)
TextEncodeBase.splitting(::BertCasedTokenization, s::Sentence) = bert_cased_tokenizer(getvalue(s))
# word is splittable with WordPiece
TextEncodeBase.splittability(::BertCasedTokenization, w::Word) = Splittable()
# split word with `WordPiece`
TextEncodeBase.splitting(t::BertCasedTokenization, w::Word) = t.wordpiece(getvalue(w))
tokenizer = pretrain"bert-cased_L-12_H-768_A-12:tokenizer" # this is just `bert_cased_tokenizer`
wordpiece = pretrain"bert-cased_L-12_H-768_A-12:wordpiece"
tkr = NestedTokenizer(BertCasedTokenization(wordpiece))
text1 = "Peter Piper picked a peck of pickled peppers"
single_without_TEB = text1 |> tokenizer |> wordpiece
single_with_TEB = tkr(Sentence(text1))
# `NestedTokenizer` return vector of vector
@assert single_without_TEB == map(getvalue, single_with_TEB[])
julia> single_without_TEB
11-element Vector{String}:
"Peter"
"Piper"
"picked"
"a"
"p"
"##eck"
"of"
"pick"
"##led"
"pepper"
"##s"
julia> single_with_TEB
1-element Vector{Vector{TextEncodeBase.TokenStage}}:
[Token("Peter"), Token("Piper"), Token("picked"), Token("a"), Token("p"), Token("##eck"), Token("of"), Token("pick"), Token("##led"), Token("pepper"), Token("##s")]
julia> single_without_TEB == map(getvalue, single_with_TEB[])
true
# define stage for batch of data
# equivalent to TextEncodeBase.@stage BatchSentence{A<:AbstractVector, M} DocumentStage
struct BatchSentence{A<:AbstractVector, M} <: TextEncodeBase.DocumentStage
x::A
meta::M
end
BatchSentence(x) = BatchSentence(x, nothing)
TextEncodeBase.setmeta(x::BatchSentence, meta) = BatchSentence(x.x, meta)
TextEncodeBase.setvalue(x::BatchSentence, y) = BatchSentence(y, x.meta)
# splittability and split behavior for `BatchSentence`
TextEncodeBase.splittability(::BertCasedTokenization, ::BatchSentence) = Splittable()
TextEncodeBase.splitting(::BertCasedTokenization, s::BatchSentence) = s.x
text2 = "Fuzzy Wuzzy was a bear"
texts = [text1, text2]
batch_without_TEB = map(wordpiece∘tokenizer, texts)
batch_with_TEB = tkr(BatchSentence(texts))
@assert batch_without_TEB == TextEncodeBase.nestedcall(getvalue, batch_with_TEB)
julia> batch_without_TEB
2-element Vector{Vector{String}}:
["Peter", "Piper", "picked", "a", "p", "##eck", "of", "pick", "##led", "pepper", "##s"]
["Fu", "##zzy", "Wu", "##zzy", "was", "a", "bear"]
julia> batch_with_TEB
2-element Vector{Vector{TextEncodeBase.TokenStage}}:
[Token("Peter"), Token("Piper"), Token("picked"), Token("a"), Token("p"), Token("##eck"), Token("of"), Token("pick"), Token("##led"), Token("pepper"), Token("##s")]
[Token("Fu"), Token("##zzy"), Token("Wu"), Token("##zzy"), Token("was"), Token("a"), Token("bear")]
julia> batch_without_TEB == TextEncodeBase.nestedcall(getvalue, batch_with_TEB)
true
```
Since the wordpiece break word into subword, we might want to know which word each subword belongs to:
```julia
julia> itkr = NestedTokenizer(TextEncodeBase.IndexedTokenization(BertCasedTokenization(wordpiece)));
julia> ibatch_with_TEB = itkr(BatchSentence(texts));
# subword from same word having the same `word_id`
julia> ibatch_with_TEB[1]
11-element Vector{TextEncodeBase.TokenStage}:
Token("Peter", (sentence_id = 1, word_id = 1, token_id = 1))
Token("Piper", (sentence_id = 1, word_id = 2, token_id = 2))
Token("picked", (sentence_id = 1, word_id = 3, token_id = 3))
Token("a", (sentence_id = 1, word_id = 4, token_id = 4))
Token("p", (sentence_id = 1, word_id = 5, token_id = 5))
Token("##eck", (sentence_id = 1, word_id = 5, token_id = 6))
Token("of", (sentence_id = 1, word_id = 6, token_id = 7))
Token("pick", (sentence_id = 1, word_id = 7, token_id = 8))
Token("##led", (sentence_id = 1, word_id = 7, token_id = 9))
Token("pepper", (sentence_id = 1, word_id = 8, token_id = 10))
Token("##s", (sentence_id = 1, word_id = 8, token_id = 11))
julia> ibatch_with_TEB[2]
7-element Vector{TextEncodeBase.TokenStage}:
Token("Fu", (sentence_id = 2, word_id = 1, token_id = 1))
Token("##zzy", (sentence_id = 2, word_id = 1, token_id = 2))
Token("Wu", (sentence_id = 2, word_id = 2, token_id = 3))
Token("##zzy", (sentence_id = 2, word_id = 2, token_id = 4))
Token("was", (sentence_id = 2, word_id = 3, token_id = 5))
Token("a", (sentence_id = 2, word_id = 4, token_id = 6))
Token("bear", (sentence_id = 2, word_id = 5, token_id = 7))
```
## TextEncoder
The text encoder is just a combination of vocabulary and tokenizer. We also
provide some helper function like (`with_head_tail`/`nested2batch`/...) for
transform the tokenizer result into `lookup`-able format.
### Example
```julia
using TextEncodeBase: nestedcall, with_head_tail, trunc_and_pad, nested2batch
# construct `Vocab` with `WordPiece`
vocab = Vocab(wordpiece.vocab, wordpiece.vocab[wordpiece.unk_idx])
# define encoder with `TextEncoder`
enc = TextEncoder(
itkr, vocab,
nested2batch ∘ trunc_and_pad(nothing, vocab.unk) ∘ with_head_tail("[CLS]", "[SEP]") ∘ nestedcall(getvalue)
)
julia> encode(enc, BatchSentence(texts))
28996x13x2 OneHotArray{28996, 3, Matrix{OneHot{0x00007144}}}:
[...]
julia> decode(enc, ans)
13×2 Matrix{String}:
"[CLS]" "[CLS]"
"Peter" "Fu"
"Piper" "##zzy"
"picked" "Wu"
"a" "##zzy"
"p" "was"
"##eck" "a"
"of" "bear"
"pick" "[SEP]"
"##led" "[UNK]"
"pepper" "[UNK]"
"##s" "[UNK]"
"[SEP]" "[UNK]"
```
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | docs | 76 | # Api reference
```@index
```
```@autodocs
Modules = [TextEncodeBase]
```
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | docs | 2732 | # Design
## Tokenizer
The overall tokenizer framework is built on top of Julia's multiple dispatch.
The main idea of the design is to make hijacking the tokenization process easier.
This is done by dispatching to all `AbstractTokenizer`, `AbstractTokenization`, and
`TokenStages`, so that even if the tokenization and input are the same, we can still
define a new tokenizer and change the behavior of some parts of that tokenization.
### TokenStages
The `TokenStages` is an abstract type used to specify the input. For example, we have
`Document <: TokenStages` and `Sentence <: TokenStages`, so the input is not just a
`String`, which we probably cannot detect what is in. Every string should be wrap
with a `TokenStages` type explicitly. With the stages in mind, we can convert the
tokenization process into recursively splitting the string and wrapping the substring
as another stage until the result is a `Token` type.
### Splittability
Not every `TokenStages` can be splitted into substring, like most of tokenizer won't split
word into subwords. Therefore, we defined the `Splittability` trait. The splittability is
codetermined by `AbstractTokenizer`, `AbstractTokenization`, and `TokenStages`. It is either
`Splittable` or `UnSplittable`. If the input is splittable, there should have a `splitting`
method defined for that combination. On the other hand, if it's unsplittable, the tokenize
function will directly call `wrap` to tranform the input into next stage. Actually, there is
also another input with type (`ParentStages = Union{Nothing, TokenStages`) that can be used
to find whether the tokenize function is called recursively.
## Vocabulary
The `Vocab` type take two argument, the list of words and a special token for all unknown words.
The default constructor of `Vocab` copy the list and remove all duplicate words. Besides, it
also try to find the unknown token in the word list. If the unknown token is *NOT* in the word list,
it will *NOT* add it into the word list. Instead, when `lookup` unknown word with that `Vocab` object,
it will return 0 as the index for all unknown words. Therefore, make sure the unknown token is in the
word list beforehand.
## Pipelines
A `Pipelines` is a chain of `Pipeline` and `Pipeline` is a function that take two arguments:
the input and a `NamedTuple`. Each `Pipeline` is attached with one or more symbols. It apply
a transform function on its arguments, and then the result will be mark with those symbols and
produce another `NamedTuple`. After that, the result `NamedTuple` will be merged into the input
`NamedTuple`. So the `Pipelines` is a sequence of transform function, and each transform result is
marked with given names.
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 0.8.3 | 66f827fa54c38cb7a7b174d3a580075b10793f5a | docs | 10382 | ```@meta
CurrentModule = TextEncodeBase
```
# TextEncodeBase
Documentation for [TextEncodeBase](https://github.com/chengchingwen/TextEncodeBase.jl).
An api for encoding text, built on top of [WordTokenizers.jl](https://github.com/JuliaText/WordTokenizers.jl).
Providing a framework to easily define custom methods to convert strings into indices.
# Usages
Here are some explanation and examples for using `TextEncodeBase.jl`, you can also find other information
from the [test](https://github.com/chengchingwen/TextEncodeBase.jl/blob/main/test/runtests.jl)
## Vocabulary
The vocabulary part contains only two api, the `Vocab` struct and the `lookup` function.
The `lookup` function is bidirectional (convert string to indices and back).
```julia
julia> vocab = Vocab(["a", "b", "c", "a", "b", "c"])
Vocab{String, StaticArrays.SizedVector{3, String, Vector{String}}}(size = 3, unk = [UNK], unki = 0)
julia> vocab_unk = Vocab(["a", "b", "xxx"], "xxx")
Vocab{String, StaticArrays.SizedVector{3, String, Vector{String}}}(size = 3, unk = xxx, unki = 3)
julia> lookup(vocab, "b")
2
julia> lookup(vocab, "d")
0
julia> lookup(vocab_unk, "d")
3
julia> lookup(vocab, 1)
"a"
julia> lookup(vocab, 10000)
"[UNK]"
julia> lookup(vocab_unk, 10000)
"xxx"
julia> lookup(vocab, ["b", "c", "a", "A", "[UNK]"])
5-element Vector{Int64}:
2
3
1
0
0
julia> lookup(OneHot, vocab, "a")
3-element OneHot{3}:
1
0
0
julia> lookup(OneHot, vocab, 3)
ERROR: DomainError with c:
cannot convert `lookup(::Vocab, 3)` = "c" into one-hot representation.
Stacktrace:
[...]
julia> oha = lookup(OneHot, vocab, ["a" "b"; "c" "d"])
3x2x2 OneHotArray{3, 3, Matrix{OneHot{0x00000003}}}:
[:, :, 1] =
1 0
0 0
0 1
[:, :, 2] =
0 0
1 0
0 0
julia> lookup(vocab, oha)
2×2 Matrix{String}:
"a" "b"
"c" "[UNK]"
```
## Pipelines
The Pipeline api help you define a series of functions that can easily be decomposed and then combined with
other function to form a new pipeline. A function (`Pipeline`) is tagged with one (or multiple) `Symbol`s.
The return values of that `Pipeline` will be bound to those symbols storing in a `NamedTuple`. Precisely,
A `Pipeline` take two inputs, a regular input value (`source`) and a `NamedTuple` (`target`) that stores
the results, applying the function to them, and then store the result with the name it carried with into `target`.
We can then chaining multiple `Pipeline`s into a `Pipelines`. For example:
```julia
julia> pipes = Pipeline{:x}(identity, 1) |> Pipeline{(:sinx, :cosx)}((x,y)->sincos(x))
julia> pipes(0.3)
(x = 0.3, sinx = 0.29552020666133955, cosx = 0.955336489125606)
# define a series of function
julia> pipes = Pipeline{:θ}(Base.Fix1(*, 2), 1) |>
Pipeline{(:sinθ, :cosθ)}(sincos, :θ) |>
Pipeline{:tanθ}(2) do target
target.sinθ / target.cosθ
end
Pipelines:
target[θ] := *(2, source)
target[(sinθ, cosθ)] := sincos(target.θ)
target[tanθ] := #68(target)
# get the wanted results
julia> pipes2 = pipes |> PipeGet{(:tanθ, :θ)}()
Pipelines:
target[θ] := *(2, source)
target[(sinθ, cosθ)] := sincos(target.θ)
target[tanθ] := #68(target)
target := (target.tanθ, target.θ)
julia> pipes2(ℯ)
(tanθ = -1.1306063769531505, θ = 5.43656365691809)
# replace some functions in pipeline
julia> pipes3 = pipes2[1] |> Pipeline{:tanθ}(tan, :θ) |> pipes2[end]
Pipelines:
target[θ] := *(2, source)
target[tanθ] := tan(target.θ)
target := (target.tanθ, target.θ)
julia> pipes3(ℯ)
(tanθ = -1.1306063769531507, θ = 5.43656365691809)
# and the pipelines is type stable
julia> using Test; @inferred pipes3(ℯ)
(tanθ = -1.1306063769531507, θ = 5.43656365691809)
```
## Tokenizer
The tokenizer part is built ontop of `WordTokenizers.jl` and provide a high-level api
to control/augment the tokenization. There're some differences between `WordTokenizers.jl`.
`WordTokenizers.jl` provides a set of tokenizers and a low-level api (`TokenBuffer`) for define
custom tokenizers. It's mainly focus on how to split a setnece into tokens. We, on the other hand,
focus on how to combine different tokenizer or include other information during the tokenization.
For example, sometimes you might want to prevent urls from being splited or add some extra tags to it,
these can be done by defining a custom `AbstractTokenizer` and overload some methods. Besides, we
force the user to explicit wrap the input as one of the stages (`Document`/`Sentence`/`Word`/...),
so no confusion.
### Example of using the Tokenizer api
Here is an example that wrapped the word tokenizer and wordpiece from `Transformers.jl` into our Tokenizer api.
```julia
using Transformers
using Transformers.Pretrain
using Transformers.BidirectionalEncoder: WordPiece, bert_cased_tokenizer
using TextEncodeBase
using TextEncodeBase: NestedTokenizer, BaseTokenization, Sentence, Word, SubWord, getvalue, Splittable
struct BertCasedTokenization <: BaseTokenization
wordpiece::WordPiece
end
# split sentence with `bert_cased_tokenizer` (define with WordTokenizers.jl's `TokenBuffer`)
TextEncodeBase.splitting(::BertCasedTokenization, s::Sentence) = bert_cased_tokenizer(getvalue(s))
# word is splittable with WordPiece
TextEncodeBase.splittability(::BertCasedTokenization, w::Word) = Splittable()
# split word with `WordPiece`
TextEncodeBase.splitting(t::BertCasedTokenization, w::Word) = t.wordpiece(getvalue(w))
tokenizer = pretrain"bert-cased_L-12_H-768_A-12:tokenizer" # this is just `bert_cased_tokenizer`
wordpiece = pretrain"bert-cased_L-12_H-768_A-12:wordpiece"
tkr = NestedTokenizer(BertCasedTokenization(wordpiece))
text1 = "Peter Piper picked a peck of pickled peppers"
single_without_TEB = text1 |> tokenizer |> wordpiece
single_with_TEB = tkr(Sentence(text1))
# `NestedTokenizer` return vector of vector
@assert single_without_TEB == map(getvalue, single_with_TEB[])
julia> single_without_TEB
11-element Vector{String}:
"Peter"
"Piper"
"picked"
"a"
"p"
"##eck"
"of"
"pick"
"##led"
"pepper"
"##s"
julia> single_with_TEB
1-element Vector{Vector{TextEncodeBase.TokenStage}}:
[Token("Peter"), Token("Piper"), Token("picked"), Token("a"), Token("p"), Token("##eck"), Token("of"), Token("pick"), Token("##led"), Token("pepper"), Token("##s")]
julia> single_without_TEB == map(getvalue, single_with_TEB[])
true
# define stage for batch of data
struct BatchSentence{A<:AbstractVector, M} <: TextEncodeBase.DocumentStage
x::A
meta::M
end
BatchSentence(x) = BatchSentence(x, nothing)
TextEncodeBase.setmeta(x::BatchSentence, meta) = BatchSentence(x.x, meta)
TextEncodeBase.setvalue(x::BatchSentence, y) = BatchSentence(y, x.meta)
# splittability and split behavior for `BatchSentence`
TextEncodeBase.splittability(::BertCasedTokenization, ::BatchSentence) = Splittable()
TextEncodeBase.splitting(::BertCasedTokenization, s::BatchSentence) = s.x
text2 = "Fuzzy Wuzzy was a bear"
texts = [text1, text2]
batch_without_TEB = map(wordpiece∘tokenizer, texts)
batch_with_TEB = tkr(BatchSentence(texts))
@assert batch_without_TEB == TextEncodeBase.nestedcall(getvalue, batch_with_TEB)
julia> batch_without_TEB
2-element Vector{Vector{String}}:
["Peter", "Piper", "picked", "a", "p", "##eck", "of", "pick", "##led", "pepper", "##s"]
["Fu", "##zzy", "Wu", "##zzy", "was", "a", "bear"]
julia> batch_with_TEB
2-element Vector{Vector{TextEncodeBase.TokenStage}}:
[Token("Peter"), Token("Piper"), Token("picked"), Token("a"), Token("p"), Token("##eck"), Token("of"), Token("pick"), Token("##led"), Token("pepper"), Token("##s")]
[Token("Fu"), Token("##zzy"), Token("Wu"), Token("##zzy"), Token("was"), Token("a"), Token("bear")]
julia> batch_without_TEB == TextEncodeBase.nestedcall(getvalue, batch_with_TEB)
true
```
Since the wordpiece break word into subword, we might want to know which word each subword belongs to:
```julia
julia> itkr = NestedTokenizer(TextEncodeBase.IndexedTokenization(BertCasedTokenization(wordpiece)));
julia> ibatch_with_TEB = itkr(BatchSentence(texts));
# subword from same word having the same `word_id`
julia> ibatch_with_TEB[1]
11-element Vector{TextEncodeBase.TokenStage}:
Token("Peter", (sentence_id = 1, word_id = 1, token_id = 1))
Token("Piper", (sentence_id = 1, word_id = 2, token_id = 2))
Token("picked", (sentence_id = 1, word_id = 3, token_id = 3))
Token("a", (sentence_id = 1, word_id = 4, token_id = 4))
Token("p", (sentence_id = 1, word_id = 5, token_id = 5))
Token("##eck", (sentence_id = 1, word_id = 5, token_id = 6))
Token("of", (sentence_id = 1, word_id = 6, token_id = 7))
Token("pick", (sentence_id = 1, word_id = 7, token_id = 8))
Token("##led", (sentence_id = 1, word_id = 7, token_id = 9))
Token("pepper", (sentence_id = 1, word_id = 8, token_id = 10))
Token("##s", (sentence_id = 1, word_id = 8, token_id = 11))
julia> ibatch_with_TEB[2]
7-element Vector{TextEncodeBase.TokenStage}:
Token("Fu", (sentence_id = 2, word_id = 1, token_id = 1))
Token("##zzy", (sentence_id = 2, word_id = 1, token_id = 2))
Token("Wu", (sentence_id = 2, word_id = 2, token_id = 3))
Token("##zzy", (sentence_id = 2, word_id = 2, token_id = 4))
Token("was", (sentence_id = 2, word_id = 3, token_id = 5))
Token("a", (sentence_id = 2, word_id = 4, token_id = 6))
Token("bear", (sentence_id = 2, word_id = 5, token_id = 7))
```
## TextEncoder
The text encoder is just a combination of vocabulary and tokenizer. We also
provide some helper function like (`with_head_tail`/`nested2batch`/...) for
transform the tokenizer result into `lookup`-able format.
### Example
```julia
using TextEncodeBase: nestedcall, with_head_tail, trunc_and_pad, nested2batch
# construct `Vocab` with `WordPiece`
vocab = Vocab(wordpiece.vocab, wordpiece.vocab[wordpiece.unk_idx])
# define encoder with `TextEncoder`
encoder = TextEncoder(
itkr, vocab,
nested2batch ∘ trunc_and_pad(nothing, vocab.unk) ∘ with_head_tail("[CLS]", "[SEP]") ∘ nestedcall(getvalue)
)
julia> encode(enc, BatchSentence(texts))
28996x13x2 OneHotArray{28996, 3, Matrix{OneHot{0x00007144}}}:
[...]
julia> decode(enc, ans)
13×2 Matrix{String}:
"[CLS]" "[CLS]"
"Peter" "Fu"
"Piper" "##zzy"
"picked" "Wu"
"a" "##zzy"
"p" "was"
"##eck" "a"
"of" "bear"
"pick" "[SEP]"
"##led" "[UNK]"
"pepper" "[UNK]"
"##s" "[UNK]"
"[SEP]" "[UNK]"
```
# Outline
```@contents
Pages = [
"design.md",
"api.md",
]
```
| TextEncodeBase | https://github.com/chengchingwen/TextEncodeBase.jl.git |
|
[
"MIT"
] | 2.4.0 | b83470988b7e7a9dfeb79b19437d5e8299b52684 | code | 427 | using Documenter
push!(LOAD_PATH, "../../src")
using GenieFramework
makedocs(
sitename = "GenieFramework - Meta Package for Genie Ecosystem",
format = Documenter.HTML(prettyurls = false),
pages = [
"Home" => "index.md",
"GenieFramework API" => [
"GenieFramework" => "API/genieframework.md",
]
],
)
deploydocs(
repo = "github.com/GenieFramework/GenieFramework.jl.git",
)
| GenieFramework | https://github.com/GenieFramework/GenieFramework.jl.git |
|
[
"MIT"
] | 2.4.0 | b83470988b7e7a9dfeb79b19437d5e8299b52684 | code | 3780 | module GenieFramework
using Revise
using Reexport
@reexport using Genie
@reexport using Stipple
@reexport using StippleUI
@reexport using StipplePlotly
@reexport using StippleTable
@reexport using StippleTabs
@reexport using Stipple.Pages
@reexport using Stipple.ReactiveTools
@reexport using StipplePlotly.Charts
@reexport using StipplePlotly.Layouts
@reexport using Genie.Renderer.Html
@reexport using Genie.Server
const DEFAULT_LAYOUT = Stipple.ReactiveTools.DEFAULT_LAYOUT
export DEFAULT_LAYOUT
export @genietools
if Genie.Configuration.isdev()
@reexport using GenieDevTools
@reexport using GenieAutoReload
@reexport using GarishPrint
@reexport using GeniePackageManager
end
# Address conflicts - this is ugly but necessary
# TODO: Refactor layout exports in next breaking release (v1)
# Both Stipple and StippleUI export layout
const q__layout = StippleUI.Layouts.layout
export q__layout
"""
This macro configures static assets(js, icons, fonts etc) based on production or development mode.
In production mode, it uses the CDN to load the assets.
In development mode, it loads the assets from the local file system.
It also register routes from GenieDevTools and GeniePackageManager per app basis which means making available routes from
GenieDevTools and GeniePackageManager in your Genie/GenieBuilder app for development purposes.
Some example routes are:
- `/geniepackagemanager`
- `/_devtools_/save`
- `/_devtools_/up`
- `/_devtools_/down`
- `/_devtools_/log`
- `/_devtools_/startrepl` etc.
which can be accessed from `app_host:app_port/geniepackagemanager` etc.
"""
macro genietools()
return quote
function __genietools()
Genie.config.log_to_file = true
Genie.config.log_requests = false
Genie.Logger.initialize_logging()
if haskey(ENV, "BASEPATH") && ! isempty(ENV["BASEPATH"])
try
Genie.Assets.assets_config!([Genie, Stipple, StippleUI, StipplePlotly, GenieAutoReload, StippleTable, StippleTabs], host = ENV["BASEPATH"])
Genie.config.websockets_base_path = ENV["BASEPATH"]
Genie.config.websockets_exposed_port = nothing
catch ex
@error ex
end
end
if Genie.Configuration.isprod() && Genie.config.cdn_enabled
try
Genie.Assets.assets_config!([Genie, Stipple, StippleUI, StipplePlotly], host = Genie.config.cdn_url)
catch ex
@error ex
end
end
if Genie.Configuration.isdev()
GenieDevTools.register_routes()
GeniePackageManager.register_routes()
Stipple.deps!(GenieAutoReload, GenieAutoReload.deps)
@async autoreload(pwd()) |> errormonitor
if ! haskey(ENV, "GENIE_PUSH_ERRORS") || ENV["GENIE_PUSH_ERRORS"] !== "false"
@async begin
GenieDevTools.tailapplog(Genie.config.path_log; env = lowercase(ENV["GENIE_ENV"])) do line
msg = GenieDevTools.parselog(line)
msg !== nothing || return
try
msg = """$(Genie.config.webchannels_eval_command) window.GENIEMODEL.\$q.notify({timeout: 0, message: `$(line)`, color: "red", closeBtn: true})"""
Stipple.WEB_TRANSPORT[].broadcast(Genie.WebChannels.tagbase64encode(msg))
catch ex
@error ex
end
end
end |> errormonitor
end
end
nothing
end
if ! isdefined($__module__, :GENIE_TOOLS_LOADED)
const GENIE_TOOLS_LOADED = true
@debug "Loading GenieTools"
Genie.Loader.bootstrap(@__MODULE__; show_banner = false)
Stipple.__init__()
StippleUI.__init__()
StipplePlotly.__init__()
__genietools()
else
@warn "GenieTools already loaded, skipping"
end
end |> esc
end
end
| GenieFramework | https://github.com/GenieFramework/GenieFramework.jl.git |
|
[
"MIT"
] | 2.4.0 | b83470988b7e7a9dfeb79b19437d5e8299b52684 | code | 101 | using GenieFramework
using Test
@testset "GenieFramework.jl" begin
# Write your tests here.
end
| GenieFramework | https://github.com/GenieFramework/GenieFramework.jl.git |
|
[
"MIT"
] | 2.4.0 | b83470988b7e7a9dfeb79b19437d5e8299b52684 | docs | 1196 | # GenieFramework
[](https://www.genieframework.com/docs/)
Meta package for Genie reactive apps. This packages exports
`Genie`, `Stipple`, `StippleUI`, `StipplePlotly`, `Stipple.Pages`, `Stipple.ModelStorage.Sessions`, `Stipple.ReactiveTools`, `Genie.Renderer.Html`, `Genie.Server` and other packages from Genie Ecosystem as required in future
## Installation
To install the most recent released version of package:
```
pkg> add GenieFramework
```
## Usage
## Basic application
Create a simple `app.jl` script
```julia
module App
using GenieFramework
@genietools
d₁ = PlotData(x=[1, 2, 3], y=[4, 1, 2], plot=StipplePlotly.Charts.PLOT_TYPE_BAR, name="Barcelona")
d₂ = PlotData(x=[1, 2, 3], y=[2, 4, 5], plot=StipplePlotly.Charts.PLOT_TYPE_BAR, name="London")
@app begin
@out data = [d₁, d₂]
@out layout = PlotLayout()
end
function ui()
[
h1("GenieFramework 🧞 Data Vizualization 📊")
plot(:data, layout=:layout)
]
end
@page("/", ui)
end
```
```shell
julia> using GenieFramework; Genie.loadapp(); Server.isrunning() || up(async=false);
```
should start the app at `localhost:8000`
| GenieFramework | https://github.com/GenieFramework/GenieFramework.jl.git |
|
[
"MIT"
] | 2.4.0 | b83470988b7e7a9dfeb79b19437d5e8299b52684 | docs | 50 | # GenieFramework
Meta package for Genie Ecosystem | GenieFramework | https://github.com/GenieFramework/GenieFramework.jl.git |
|
[
"MIT"
] | 2.4.0 | b83470988b7e7a9dfeb79b19437d5e8299b52684 | docs | 88 | ```@meta
CurrentModule = GenieFramework
```
```@autodocs
Modules = [GenieFramework]
``` | GenieFramework | https://github.com/GenieFramework/GenieFramework.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 1229 | using ElasticSurfaceEmbedding
using Documenter
using DemoCards
# Create demo with DemoCards.jl
gallery_demopage, gallery_cb, gallery_assets = makedemos(joinpath("gallery"))
# Standard Documenter.jl process
DocMeta.setdocmeta!(ElasticSurfaceEmbedding, :DocTestSetup, :(using ElasticSurfaceEmbedding); recursive = true)
makedocs(;
modules = [ElasticSurfaceEmbedding],
authors = "hyrodium <[email protected]> and contributors",
repo = "https://github.com/hyrodium/ElasticSurfaceEmbedding.jl/blob/{commit}{path}#{line}",
sitename = "ElasticSurfaceEmbedding.jl",
format = Documenter.HTML(;
prettyurls = true,
canonical = "https://hyrodium.github.io/ElasticSurfaceEmbedding.jl",
assets = ["assets/custom.css", gallery_assets],
repolink = "https://github.com/hyrodium/ElasticSurfaceEmbedding.jl"
),
pages = [
"Home" => "index.md",
"Craft" => "craft.md",
"Numerical computation" => "run-julia.md",
"Symbolic computation" => "run-wolfram.md",
"Gallery" => gallery_demopage,
"API" => "api.md",
],
)
# Postprocess for demos
gallery_cb()
# Deploy docs
deploydocs(; repo = "github.com/hyrodium/ElasticSurfaceEmbedding.jl")
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 2603 | # ---
# title: Helicatenoid
# cover: ../assets/helicatenoid.jpg
# description: Weaving a transformable curved surface from catenoid to helicoid.
# ---
# Weaving a transformable curved surface from catenoid to helicoid.
# ```@raw html
# <div class="videoWrapper">
# <!-- Copy & Pasted from YouTube -->
# <iframe width="560" height="315" src="https://www.youtube.com/embed/Gp6XkPLCw7s" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
# </div>
# ```
# ## Load packages
using IntervalSets
using BasicBSpline
using StaticArrays
using ElasticSurfaceEmbedding
# ## Define the shape of the surface (non-periodic direction)
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹,u²) = SVector(cos(u²)*cosh(u¹),sin(u²)*cosh(u¹),u¹)
n = 9
Da(n) = (-π/2..π/2,-π/(4n)..π/(4n))
# ## Compute the shape of the embeddings
show_strain(Da(n))
steptree = initial_state(Da(n))
newton_onestep!(steptree, fixingmethod=:fix3points)
newton_onestep!(steptree)
newton_onestep!(steptree)
newton_onestep!(steptree)
newton_onestep!(steptree)
refinement!(steptree, p₊=(0,1), k₊=suggest_knotvector(steptree))
newton_onestep!(steptree)
newton_onestep!(steptree)
pin!(steptree)
# ## Export the shape in SVG format
export_pinned_steps("helicatenoid-a", steptree, unitlength=(40,"mm"), mesh=(18,1))
# 
# ## Define the shape of the surface (periodic direction)
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹,u²) = SVector(cos(u¹)*cosh(u²),sin(u¹)*cosh(u²),u²)
Db(i,n) = (-π..π,(i-1)*π/(2n)..(i)*π/(2n))
## Check the maximum strain
for i in 1:n
show_strain(Db(i,n))
end
## Numerical computing
steptree = StepTree()
for i in 1:n
initial_state!(steptree, Db(i,n))
newton_onestep!(steptree, fixingmethod=:fix3points)
newton_onestep!(steptree)
newton_onestep!(steptree)
newton_onestep!(steptree)
newton_onestep!(steptree)
refinement!(steptree, p₊=(0,1), k₊=suggest_knotvector(steptree))
newton_onestep!(steptree)
newton_onestep!(steptree)
pin!(steptree)
end
# ## Export the shapes in SVG format
export_pinned_steps("helicatenoid-b", steptree, unitlength=(40,"mm"), mesh=(36,1))
#   
#   
#   
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 3495 | # ---
# title: Helicatenoid2
# description: Weaving a transformable curved surface from catenoid to helicoid (2).
# ---
# Weaving a transformable curved surface from catenoid to helicoid.
# ## Load packages
using Luxor
using IntervalSets
using BasicBSpline
using BasicBSplineFitting
using StaticArrays
using ElasticSurfaceEmbedding
using LinearAlgebra
# ## Define the shape of the surface
const N = 8
const J = 1
f0(s) = max(-abs(s+1/2N-1)-(1/2N-1), 0)
f1(s) = -1/2+f0(mod(s-J/N, 2))
f2(s) = 1/2-f0(mod(s-1-J/N, 2))
# 0≤u≤2π, -π/2≤v≤π/2
# 0≤s≤2, 0≤t≤1
u(s,t) = π*s
v(s,t) = π*(f1(s)*(1-t) + t*f2(s))
catenoid(u,v) = SVector(cos(u)*cosh(v),sin(u)*cosh(v),v)
ElasticSurfaceEmbedding.𝒑₍₀₎(s,t) = catenoid(u(s,t), v(s,t))
# ## Compute the shape of the embeddings
splitat = [-1/N, -1/2N, 0, 1/2N, 1/N, 1, 1+1/2N, 1+1/N]
steptree = StepTree()
for shift in [0, -1/N, -2/N, -3/N]
initial_state!(steptree, (0+shift..2+shift, 0..1), splitat)
newton_onestep!(steptree, fixingmethod=:fix5points)
newton_onestep!(steptree, fixingmethod=:fix3points)
newton_onestep!(steptree)
refinement!(steptree, p₊=(0,1), k₊=ElasticSurfaceEmbedding.suggest_knotvector(steptree))
for _ in 1:5 newton_onestep!(steptree) end
pin!(steptree)
end
# ## Helper functions to export svg images
function create_bezierpath(C::BSplineManifold{1,(3,),Point})
P = bsplinespaces(C)[1]
k = knotvector(P)
k′ = 3*unique(k) + k[[1,end]]
P′ = BSplineSpace{3}(k′)
C′ = refinement(C,P′)
a′ = controlpoints(C′)
n′ = dim(P′)
m = (n′-1) ÷ 3
bezierpath = BezierPath([BezierPathSegment(a′[3i-2], a′[3i-1], a′[3i], a′[3i+1]) for i in 1:m])
return bezierpath
end
function svector2point(M::BSplineManifold)
P = bsplinespaces(M)
a = controlpoints(M)
a′ = [Point(p[1], -p[2])*100/π for p in a]
M′ = BSplineManifold(a′, P)
return M′
end
# ## Settings for export
xlims=(-2,2)
ylims=(-2,2)
unitlength = (100, "mm")
width = (xlims[2] - xlims[1]) * unitlength[1]
height = (ylims[2] - ylims[1]) * unitlength[1]
# ## Export embeddings
mkpath("helicatenoid2")
for i in 1:(N+1)÷2
filepath = joinpath("helicatenoid2", "embedding-$(i).svg")
M = svector2point(steptree.steps[10i].manifold)
D¹ = domain(bsplinespaces(M)[1])
D² = domain(bsplinespaces(M)[2])
u²₋ = minimum(D²)
u²₊ = maximum(D²)
Drawing(width, height, filepath)
origin()
background("white")
sethue("red")
C = M(:,u²₋)
path = create_bezierpath(C)
drawbezierpath(path, :stroke)
C = M(:,u²₊)
path = create_bezierpath(C)
drawbezierpath(path, :stroke)
p1 = controlpoints(M)[begin,begin]
p2 = controlpoints(M)[begin,end]
p3 = controlpoints(M)[end,begin]
p4 = controlpoints(M)[end,end]
v12 = p1-p2
q1 = p1 - Point(v12[2],-v12[1])/norm(v12) * 6
q2 = p2 - Point(v12[2],-v12[1])/norm(v12) * 6
line(p1,q1)
line(q2)
line(p2)
strokepath()
v34 = p3-p4
q3 = p3 + Point(v34[2],-v34[1])/norm(v34) * 6
q4 = p4 + Point(v34[2],-v34[1])/norm(v34) * 6
line(p3,q3)
line(q4)
line(p4)
strokepath()
finish()
preview()
script = read(filepath, String)
lines = split(script, "\n")
lines[2] = replace(lines[2],"pt\""=>"mm\"")
write(filepath, join(lines,"\n"))
end
# The output files will be saved as `embedding-$(i).svg`.
# 
# 
# 
# 
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 1640 | # ---
# title: Hyperbolic paraboloid
# cover: ../assets/hyperbolic_paraboloid.jpg
# description: Weaving a simple curved surface with negative curvature.
# ---
# Weaving a simple curved surface with negative curvature.
# 
# ## Load packages
using IntervalSets
using BasicBSpline
using StaticArrays
using ElasticSurfaceEmbedding
# ## Define the shape of the surface
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹,u²) = SVector(u¹, u², u¹^2-u²^2)
n = 10
D(i,n) = (-1.0..1.0, (i-1)/n..i/n)
# ## Compute the shape of the embeddings
steptree = ElasticSurfaceEmbedding.StepTree()
for i in 1:10
initial_state!(steptree, D(i,n))
newton_onestep!(steptree, fixingmethod=:fix3points)
newton_onestep!(steptree)
newton_onestep!(steptree)
newton_onestep!(steptree)
newton_onestep!(steptree)
refinement!(steptree, p₊=(0,1), k₊=suggest_knotvector(steptree))
newton_onestep!(steptree)
newton_onestep!(steptree)
pin!(steptree)
end
# ## Export the shapes in SVG format
export_pinned_steps("hyperbolic_paraboloid", steptree, xlims=(-2,2), ylims=(-2,2), unitlength=(100,"mm"), mesh=(20,1))
# 
# 
# 
# 
# 
# 
# 
# 
# 
# 
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 1486 | # ---
# title: Paraboloid
# cover: ../assets/paraboloid.jpg
# description: Weaving a simple curved surface with positive curvature.
# ---
# Weaving a simple curved surface with positive curvature.
# 
# ## Load packages
using IntervalSets
using BasicBSpline
using StaticArrays
using ElasticSurfaceEmbedding
# ## Define the shape of the surface
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹,u²) = SVector(u¹, u², u¹^2+u²^2)
n = 10
D(i,n) = (-1.0..1.0, (i-1)/n..i/n)
# ## Compute the shape of the embeddings
steptree = ElasticSurfaceEmbedding.StepTree()
for i in 1:10
initial_state!(steptree, D(i,n))
newton_onestep!(steptree, fixingmethod=:fix3points)
newton_onestep!(steptree)
newton_onestep!(steptree)
newton_onestep!(steptree)
newton_onestep!(steptree)
refinement!(steptree, p₊=(0,1), k₊=suggest_knotvector(steptree))
newton_onestep!(steptree)
newton_onestep!(steptree)
pin!(steptree)
end
# ## Export the shapes in SVG format
export_pinned_steps("paraboloid", steptree, xlims=(-2,2), ylims=(-2,2), unitlength=(100,"mm"), mesh=(20,1))
# 
# 
# 
# 
# 
# 
# 
# 
# 
# 
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 4014 | # ---
# title: Stereographic projection
# cover: ../assets/stereographicprojection.jpg
# description: A point light illuminates the grid points on the ground.
# ---
# A point light illuminates the grid points on the ground.
# 
# # Load packages
using Luxor
using IntervalSets
using BasicBSpline
using BasicBSplineFitting
using StaticArrays
using ElasticSurfaceEmbedding
# # Compute the embedding shapes
# ## Shape definition
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹,u²) = SVector(2*u¹/(1+u¹^2+u²^2), 2*u²/(1+u¹^2+u²^2), (-1+u¹^2+u²^2)/(1+u¹^2+u²^2))
n = 10
D(i,n) = (-2.0..2.0, 2(i-1)/n..2i/n)
# ## Strain estimation
show_strain(D(1,n))
# ## Main computation
steptree = StepTree()
for i in 1:10
initial_state!(steptree, D(i,n))
newton_onestep!(steptree, fixingmethod=:fix3points)
newton_onestep!(steptree)
refinement!(steptree, p₊=(0,1), k₊=suggest_knotvector(steptree))
newton_onestep!(steptree)
newton_onestep!(steptree)
pin!(steptree)
end
# ## Helper functions to export svg images
function create_bezierpath(C::BSplineManifold{1,(3,),Point})
P = bsplinespaces(C)[1]
k = knotvector(P)
k′ = 3*unique(k) + k[[1,end]]
P′ = BSplineSpace{3}(k′)
C′ = refinement(C,P′)
a′ = controlpoints(C′)
n′ = dim(P′)
m = (n′-1) ÷ 3
bezierpath = BezierPath([BezierPathSegment(a′[3i-2], a′[3i-1], a′[3i], a′[3i+1]) for i in 1:m])
return bezierpath
end
function svector2point(M::BSplineManifold, unitlength)
P = bsplinespaces(M)
a = controlpoints(M)
a′ = [Point(p[1]*unitlength[1], -p[2]*unitlength[1]) for p in a]
M′ = BSplineManifold(a′, P)
return M′
end
# ## Settings for export
xlims=(-3,3)
ylims=(-1,1)
unitlength = (200, "mm")
r = 0.025
# ## Export all embedded shapes with arcs
mkpath("stereographicprojection")
for i in 1:10
M = svector2point(steptree.steps[6i].manifold, unitlength)
D¹ = domain(bsplinespaces(M)[1])
D² = domain(bsplinespaces(M)[2])
u¹s = range(extrema(D¹)...,21)[2:end-1]
u²₋ = minimum(D²)
u²₊ = maximum(D²)
width = (xlims[2] - xlims[1]) * unitlength[1]
height = (ylims[2] - ylims[1]) * unitlength[1]
filepath = joinpath("stereographicprojection", "embedding-$(i).svg")
Drawing(width, height, filepath)
origin()
background("white")
sethue("red")
C = M(:,u²₋)
path = create_bezierpath(C)
drawbezierpath(path, :stroke)
C = M(:,u²₊)
path = create_bezierpath(C)
drawbezierpath(path, :stroke)
C = M(2,:)
path = create_bezierpath(C)
drawbezierpath(path, :stroke)
C = M(-2,:)
path = create_bezierpath(C)
drawbezierpath(path, :stroke)
for u¹ in u¹s
k = KnotVector([0,0,0,0,0.25,0.5,0.75,1,1,1,1])
P = BSplineSpace{3}(k)
dim(P)
a = fittingcontrolpoints(t -> M(u¹+r*cospi(t), u²₋+r*sinpi(t)), P)
C = BSplineManifold(a,P)
path = create_bezierpath(C)
drawbezierpath(path, :stroke)
a = fittingcontrolpoints(t -> M(u¹+r*cospi(t), u²₊-r*sinpi(t)), P)
C = BSplineManifold(a,P)
path = create_bezierpath(C)
drawbezierpath(path, :stroke)
end
finish()
preview()
script = read(filepath, String)
lines = split(script, "\n")
lines[2] = replace(lines[2],"pt\""=>"mm\"")
write(filepath, join(lines,"\n"))
end
# The output files will be saved as `embedding-$(i).svg`.
# By modifying these files, we can place all of the shapes in yatsugiri-size (八ツ切, approximately 270×390 mm) paper like this:
# 
# Cutting and weaving these shape will result the sphere in the top image.
# Please check the following references for more information.
# # References
# * [紙工作で立体射影をつくった話](https://note.com/hyrodium/n/n7b7cf03a7d91)
# * [立体射影製作キット](https://hackmd.io/@hyrodium/HJsIPNKqo)
# * [Stereographic projection weaving kit](https://hackmd.io/@hyrodium/H1epn1rRj)
# * [Further adventures in stereographic projection](https://www.youtube.com/watch?v=lbUOScpu0ws)
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 950 | module ElasticSurfaceEmbedding
using LinearAlgebra
using Printf
using Dates
import Statistics.mean
using ImageShow
using IntervalSets
using StaticArrays
import FileIO.load
import FileIO.save
using OffsetArrays
using ForwardDiff
using FastGaussQuadrature
using Colors
using Luxor
import ColorBlendModes
using BasicBSpline
using BasicBSplineFitting
using BasicBSplineExporter
# Tree structure
export StepTree
# Numerical computing
export initial_state, initial_state!
export newton_onestep!
export refinement!
# Pin
export pin!, unpin!
# Exports
export export_all_steps, export_pinned_steps
# utilities
export show_strain, show_knotvector, suggest_knotvector
# auto
export auto_allsteps, auto_allsteps!
include("_constants.jl")
include("_graphics.jl")
include("_bspline.jl")
include("_geometry.jl")
include("_elasticity.jl")
include("_io.jl")
include("_initialstates.jl")
include("_newton.jl")
include("_pin.jl")
include("_auto.jl")
end # module
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 770 | function auto_allsteps(D::Tuple{ClosedInterval{<:Real}, ClosedInterval{<:Real}})
auto_allsteps!(StepTree(), D)
end
function auto_allsteps(Ds::Vector{<:Tuple{ClosedInterval{<:Real}, ClosedInterval{<:Real}}})
steptree = StepTree()
for D in Ds
auto_allsteps!(steptree, D)
end
return steptree
end
function auto_allsteps!(steptree::StepTree, D::Tuple{ClosedInterval{<:Real}, ClosedInterval{<:Real}})
_, D₂ = D
steptree = initial_state!(steptree, D)
newton_onestep!(steptree, fixingmethod=:fix3points)
newton_onestep!(steptree)
refinement!(steptree, p₊=(0,1), k₊=suggest_knotvector(steptree))
newton_onestep!(steptree)
newton_onestep!(steptree)
newton_onestep!(steptree)
pin!(steptree)
return steptree
end
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 5411 | function _arrayofvector2array(a::AbstractArray{SVector{2,Float64},2})
n1, n2 = size(a)
a_2dim = [a[i1, i2][j] for i1 in 1:n1, i2 in 1:n2, j in 1:2]
return a_2dim
end
function _array2arrayofvector(a::Array{<:Real,3})
n1, n2 = size(a)
a_vec = [SVector{2}(a[i1, i2, :]) for i1 in 1:n1, i2 in 1:n2]
return a_vec
end
"""
Affine transform of control points.
"""
function _affine(𝒂::Matrix{<:SVector}, A::SMatrix{2,2}, b::SVector{2})
# x'=Ax+b
n₁, n₂ = size(𝒂)
return [(A * 𝒂[I₁, I₂] + b) for I₁ in 1:n₁, I₂ in 1:n₂]
end
function _rotate(𝒂::Matrix{<:SVector})
n₁, n₂ = size(𝒂)
ind0 = [(n₁ + 1) ÷ 2, (n₂ + 1) ÷ 2]
ind1 = ind0 - [0, 1]
v = 𝒂[ind1...] - 𝒂[ind0...]
R = -(@SMatrix [v[2] -v[1]; v[1] v[2]]) / norm(v)
return _affine(𝒂, R, SVector(0.0, 0.0))
end
function _center(𝒂::Matrix{<:SVector})
xs = [p[1] for p in 𝒂]
ys = [p[2] for p in 𝒂]
x_min = minimum(xs)
x_max = maximum(xs)
y_min = minimum(ys)
y_max = maximum(ys)
x = (x_min + x_max) / 2
y = (y_min + y_max) / 2
return _affine(𝒂, one(SMatrix{2,2}), -SVector(x, y))
end
function _positioning(𝒂::Matrix{<:SVector})
return _center(_rotate(𝒂))
end
function _positioning(M::BSplineManifold{2})
Ps = bsplinespaces(M)
𝒂 = controlpoints(M)
𝒂′ = _positioning(𝒂)
return BSplineManifold(𝒂′, Ps)
end
"""
refinement!(steptree; p₊::Tuple{Int,Int}=(0, 0), k₊::Tuple{AbstractKnotVector,AbstractKnotVector}=(EmptyKnotVector(),EmptyKnotVector()), parent::Int=0)
Compute a refinement of the B-spline manifold
"""
function refinement!(steptree, parent::Int = 0; p₊ = (0, 0), k₊ = (EmptyKnotVector(), EmptyKnotVector()))
parent = _validindex(steptree, parent)
M = loadM(steptree, index = parent)
P₁, P₂ = bsplinespaces(M)
k₁, k₂ = knotvector(P₁), knotvector(P₂)
p₊₁, p₊₂ = p₊
k₊₁, k₊₂ = k₊
if !iszero(k₊₁) && !(k₁[1] < k₊₁[1] && k₊₁[end] < k₁[end])
error("given additional knots for refinement are out of range")
end
if !iszero(k₊₂) && !(k₂[1] < k₊₂[1] && k₊₂[end] < k₂[end])
error("given additional knots for refinement are out of range")
end
comment = "Refinement - p₊:$((p₊₁, p₊₂)), k₊:$((BasicBSpline._vec(k₊₁), BasicBSpline._vec(k₊₂)))"
comment = replace(comment, "Float64" => "")
M = refinement_I(M, (Val(p₊₁), Val(p₊₂)), (k₊₁, k₊₂))
info = Dict(["type" => "refinement"])
step = Step(M, comment, info)
addstep!(steptree, step, parent)
end
function suggest_knotvector(steptree; index=0)
M = loadM(steptree, index = index)
P = bsplinespaces(M)
k₁, k₂ = knotvector.(P)
k₁′ = unique(k₁)
k₂′ = unique(k₂)
k₁₊ = KnotVector([(k₁′[i] + k₁′[i+1]) / 2 for i in 1:(length(k₁′)-1)])
k₂₊ = KnotVector([(k₂′[i] + k₂′[i+1]) / 2 for i in 1:(length(k₂′)-1)])
return k₁₊, k₂₊
end
"""
show_knotvector(; index=0)
Show current knotvector and suggestions for knot insertions (with given index).
"""
function show_knotvector(steptree; index = 0)
M = loadM(steptree, index = index)
P = bsplinespaces(M)
k₁, k₂ = knotvector.(P)
k₁₊, k₂₊ = suggest_knotvector(steptree, index=index)
msg = """
Current knotvectors (k₁, k₂) and suggestions for knot insertions (k₁₊, k₂₊)
k₁: , $(BasicBSpline._vec(k₁))
k₂: , $(BasicBSpline._vec(k₂))
k₁₊: , $(BasicBSpline._vec(k₁₊))
k₂₊: , $(BasicBSpline._vec(k₂₊))
"""
@info msg
return
end
function integrate(C::BSplineManifold{1})
a = controlpoints(C)
P = bsplinespaces(C)[1]
p = degree(P)
k = knotvector(P)
k′ = k + k[[begin, end]]
p′ = p+1
P′ = BSplineSpace{p′}(k′)
A = [ifelse(i≤j, 0.0, (k′[p′+j+1]-k′[j+1])/(p′)) for i in 1:dim(P′), j in 1:dim(P)]
return BSplineManifold(A*a, P′)
end
function _interpolate2(ts::AbstractVector{<:Real}, fs::AbstractVector{T}, f′0::T) where T
# Quadric open B-spline space
p = 2
k = KnotVector(ts) + KnotVector([ts[1],ts[end]]) * p
P = BSplineSpace{p}(k)
# dimensions
m = length(ts)
n = dim(P)
# The interpolant function has a f''=0 property at bounds.
dP = BSplineDerivativeSpace{1}(P)
d0 = [bsplinebasis(dP,j,ts[1]) for j in 1:n]
# Compute the interpolant function (1-dim B-spline manifold)
M = [bsplinebasis(P,j,ts[i]) for i in 1:m, j in 1:n]
M = vcat(d0', M)
y = vcat([f′0], fs)
return BSplineManifold(inv(M)*y, P)
end
function _merge(manifolds::Vector{<:BSplineManifold{2, p}}) where p
# Assume all B-spline manifolds have open knot vectors.
p₁, p₂ = p
k₁ = copy(knotvector(bsplinespaces(manifolds[1])[1]))
k₂ = knotvector(bsplinespaces(manifolds[1])[2])
for i in 2:length(manifolds)
pop!(k₁.vector)
k₁ += knotvector(bsplinespaces(manifolds[i])[1])[p₁+2:end]
end
P₁ = BSplineSpace{p₁}(k₁)
P₂ = BSplineSpace{p₂}(k₂)
𝒂 = controlpoints(manifolds[1])
for i in 2:length(manifolds)
_𝒂 = controlpoints(manifolds[i])
v = 𝒂[end,:]
_v = _𝒂[1,:]
Δ = v[end] - v[1]
_Δ = _v[end] - _v[1]
a = dot(Δ, _Δ)
b = cross(Δ, _Δ)
r = (@SMatrix [a b;-b a]) / norm([a,b])
_w = [r*p for p in _v]
c = sum(v)/length(v)
_c = sum(_w)/length(_w)
_𝒂 = [r*p-_c+c for p in _𝒂]
𝒂 = vcat(𝒂[1:end-1, :], (𝒂[end:end, :]+_𝒂[1:1, :])/2, _𝒂[2:end, :])
end
return BSplineManifold(𝒂, P₁, P₂)
end
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 202 | const d = 2 # Dimension of surface
const 𝝂 = 0.25 # Poisson's Ratio
const Y = 1.0 # Young Modulus
const 𝝀 = 𝝂 * Y / ((1 + 𝝂) * (1 - (d - 1) * 𝝂)) # Lamé constant
const 𝝁 = 1 / 2(1 + 𝝂) # Lamé constant
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 3101 | # Strain related functions
E(M, u¹, u²) = (g₍ₜ₎(M, u¹, u²) - g₍₀₎(u¹, u²)) / 2
E₁₁(M::BSplineManifold{2}, u¹, u²) = (g₍ₜ₎₁₁(M, u¹, u²) - g₍₀₎₁₁(u¹, u²)) / 2
E⁽⁰⁾₁₁(M::BSplineManifold{2}, u¹, u²) = E₁₁(M, u¹, u²) / g₍₀₎₁₁(u¹, u²)
function Ẽ⁽⁰⁾₁₁(D₂::ClosedInterval, u¹, u²)
# Breadth of the strip-like shape
b = width(D₂) / 2
# Center coordinate of u²
c = sum(extrema(D₂)) / 2
# Normalized coordinate of u²
r = (u² - c) / b
# Compute the predicted strain with the Strain Approximation Theorem
return (1 / 2) * K₍₀₎(u¹, D₂) * B̃(u¹, D₂)^2 * (r^2 - 1 / 3)
end
function Ẽ⁽⁰⁾₁₁(M::BSplineManifold{2}, u¹, u²)
_, P₂ = bsplinespaces(M)
p₂ = degree(P₂)
k₂ = knotvector(P₂)
D₂ = k₂[1+p₂] .. k₂[end-p₂]
return Ẽ⁽⁰⁾₁₁(D₂, u¹, u²)
end
function _compute_minmax_strain(M)
P = bsplinespaces(M)
D₁, D₂ = domain.(P)
mesh = (500, 50) # TODO
κ₁ = range(leftendpoint(D₁) + 1e-8, stop = rightendpoint(D₁) - 1e-8, length = mesh[1] + 1)
κ₂ = range(leftendpoint(D₂) + 1e-8, stop = rightendpoint(D₂) - 1e-8, length = mesh[2] + 1)
E = [E⁽⁰⁾₁₁(M, u₁, u₂) for u₁ in κ₁, u₂ in κ₂]
return (minimum(E), maximum(E))
end
function _predict_minmax_strain(D::Tuple{ClosedInterval{<:Real}, ClosedInterval{<:Real}})
D₁, D₂ = D
mesh = (500, 50) # TODO
κ₁ = range(leftendpoint(D₁), stop = rightendpoint(D₁), length = mesh[1] + 1)
κ₂ = range(leftendpoint(D₂), stop = rightendpoint(D₂), length = mesh[2] + 1)
E = [Ẽ⁽⁰⁾₁₁(D₂, u₁, u₂) for u₁ in κ₁, u₂ in κ₂]
return (minimum(E), maximum(E))
end
"""
show_strain(D; index=0)
Show the predicted maximum strain and, if possible, also the computed strain with the given index.
"""
function show_strain(D::Tuple{ClosedInterval{<:Real}, ClosedInterval{<:Real}}; index = 0)
minE, maxE = _predict_minmax_strain(D)
D₁, D₂ = D
msg = "Strain - domain: " * repr([endpoints(D₁)...]) * "×" * repr([endpoints(D₂)...]) * "\n"
msg *= "Predicted: (min: $(minE), max: $(maxE))\n"
# if isTheShapeComputed()
# M = loadM(index=index)
# minE, maxE = _compute_minmax_strain(M)
# msg *= "Computed: (min: $(minE), max: $(maxE))\n"
# end
@info msg
return
end
"""
show_strain(domains; index=0)
Show the predicted maximum strain and, if possible, also the computed strain with the given index.
"""
function show_strain(domains::Vector{<:Tuple{ClosedInterval{<:Real}, ClosedInterval{<:Real}}}; index = 0)
msg = ""
for domain in domains
minE, maxE = _predict_minmax_strain(domain)
D₁, D₂ = domain
msg *= "Strain - domain: " * repr([endpoints(D₁)...]) * "×" * repr([endpoints(D₂)...]) * "\n"
msg *= " Predicted: (min: $(minE), max: $(maxE))\n"
end
# if isTheShapeComputed()
# M = loadM(index=index)
# minE, maxE = _compute_minmax_strain(M)
# msg *= "Computed: (min: $(minE), max: $(maxE))\n"
# end
@info msg
return
end
# Elastic Modulus
function C(i, j, k, l, g⁻)
𝝀 * g⁻[i, j] * g⁻[k, l] + 𝝁 * (g⁻[i, k] * g⁻[j, l] + g⁻[i, l] * g⁻[j, k])
end
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 3146 | ## Reference State
# Parametric mapping of Reference state
𝒑₍₀₎(u¹, u²) = SVector(u¹, u², 0)
# alias to avoid non-standard unicode characters
const surface = 𝒑₍₀₎
# Tangent vector
𝒑₁₍₀₎(u¹, u²) = ForwardDiff.derivative(u¹ -> 𝒑₍₀₎(u¹, u²), u¹)
𝒑₂₍₀₎(u¹, u²) = ForwardDiff.derivative(u² -> 𝒑₍₀₎(u¹, u²), u²)
𝒑₁₁₍₀₎(u¹, u²) = ForwardDiff.derivative(u¹ -> 𝒑₁₍₀₎(u¹, u²), u¹)
𝒑₁₂₍₀₎(u¹, u²) = ForwardDiff.derivative(u² -> 𝒑₁₍₀₎(u¹, u²), u²)
𝒑₂₁₍₀₎(u¹, u²) = ForwardDiff.derivative(u¹ -> 𝒑₂₍₀₎(u¹, u²), u¹)
𝒑₂₂₍₀₎(u¹, u²) = ForwardDiff.derivative(u² -> 𝒑₂₍₀₎(u¹, u²), u²)
# Normal vector
𝒆₍₀₎(u¹, u²) = normalize(cross(𝒑₁₍₀₎(u¹, u²), 𝒑₂₍₀₎(u¹, u²)))
# Riemannian metrix
g₍₀₎₁₁(u¹, u²) = dot(𝒑₁₍₀₎(u¹, u²), 𝒑₁₍₀₎(u¹, u²))
g₍₀₎₁₂(u¹, u²) = dot(𝒑₁₍₀₎(u¹, u²), 𝒑₂₍₀₎(u¹, u²))
g₍₀₎₂₁(u¹, u²) = dot(𝒑₂₍₀₎(u¹, u²), 𝒑₁₍₀₎(u¹, u²))
g₍₀₎₂₂(u¹, u²) = dot(𝒑₂₍₀₎(u¹, u²), 𝒑₂₍₀₎(u¹, u²))
g₍₀₎(u¹, u²) = @SMatrix [g₍₀₎₁₁(u¹, u²) g₍₀₎₁₂(u¹, u²); g₍₀₎₂₁(u¹, u²) g₍₀₎₂₂(u¹, u²)]
h₍₀₎(u¹, u²) = @SMatrix [
(𝒆₍₀₎(u¹, u²)'*𝒑₁₁₍₀₎(u¹, u²)) (𝒆₍₀₎(u¹, u²)'*𝒑₁₂₍₀₎(u¹, u²))
(𝒆₍₀₎(u¹, u²)'*𝒑₂₁₍₀₎(u¹, u²)) (𝒆₍₀₎(u¹, u²)'*𝒑₂₂₍₀₎(u¹, u²))
]
# Gaussian curvature
K₍₀₎(u¹, u²) = det(h₍₀₎(u¹, u²)) / det(g₍₀₎(u¹, u²))
# Volume form
𝝊₍₀₎(u¹, u²) = norm(cross(𝒑₁₍₀₎(u¹, u²), 𝒑₂₍₀₎(u¹, u²)))
g⁻₍₀₎(u¹, u²) = inv(g₍₀₎(u¹, u²)) # 第1基本量の逆
g₁₍₀₎(u¹, u²) = ForwardDiff.derivative(u¹ -> g₍₀₎(u¹, u²), u¹)
g₂₍₀₎(u¹, u²) = ForwardDiff.derivative(u² -> g₍₀₎(u¹, u²), u²)
# Christoffel symbol
𝛤₍₀₎²₁₁(u¹, u²) =
(g⁻₍₀₎(u¹, u²)[2, 1] * g₁₍₀₎(u¹, u²)[1, 1] + g⁻₍₀₎(u¹, u²)[2, 2] * (2g₁₍₀₎(u¹, u²)[2, 1] - g₂₍₀₎(u¹, u²)[1, 1])) / 2
e⁽⁰⁾₁(u¹, u²) = normalize(𝒑₁₍₀₎(u¹, u²))
e⁽⁰⁾₂(u¹, u²) = normalize(𝒑₂₍₀₎(u¹, u²) - (g₍₀₎₁₂(u¹, u²) / g₍₀₎₁₁(u¹, u²)) * 𝒑₁₍₀₎(u¹, u²))
c(D₂::ClosedInterval) = sum(extrema(D₂)) / 2 # Coordinate on the center curve
s₍₀₎(t, D₂::ClosedInterval) = sqrt(g₍₀₎₁₁(t, c(D₂)))
ṡ₍₀₎(t, D₂::ClosedInterval) = (1 / 2) * (g₁₍₀₎(t, c(D₂)))[1, 1] / sqrt(g₍₀₎₁₁(t, c(D₂)))
𝜅₍₀₎(t, D₂::ClosedInterval) = 𝛤₍₀₎²₁₁(t, c(D₂)) * 𝝊₍₀₎(t, c(D₂)) / s₍₀₎(t, D₂)^3 # Geodesic curvature
K₍₀₎(t, D₂::ClosedInterval) = K₍₀₎(t, c(D₂)) # Gaussian curvature
B̃(t, D₂::ClosedInterval) = dot(e⁽⁰⁾₂(t, c(D₂)), 𝒑₂₍₀₎(t, c(D₂))) * width(D₂) / 2 # Breadth of the piece of surface
g₍₀₎₁₁(u¹, D₂::ClosedInterval) = g₍₀₎₁₁(u¹, c(D₂))
g₍₀₎₁₂(u¹, D₂::ClosedInterval) = g₍₀₎₁₂(u¹, c(D₂))
g₍₀₎₂₁(u¹, D₂::ClosedInterval) = g₍₀₎₂₁(u¹, c(D₂))
g₍₀₎₂₂(u¹, D₂::ClosedInterval) = g₍₀₎₂₂(u¹, c(D₂))
𝝊₍₀₎(u¹, D₂::ClosedInterval) = 𝝊₍₀₎(u¹, c(D₂))
# Current State
𝒑₍ₜ₎(M, u¹, u²) = unbounded_mapping(M, u¹, u²)
# This can be faster with BSplineDerivativeSpace, but we don't need speed here.
𝒑₁₍ₜ₎(M, u¹, u²) = ForwardDiff.derivative(u¹ -> 𝒑₍ₜ₎(M, u¹, u²), u¹)
𝒑₂₍ₜ₎(M, u¹, u²) = ForwardDiff.derivative(u² -> 𝒑₍ₜ₎(M, u¹, u²), u²)
g₍ₜ₎₁₁(M, u¹, u²) = dot(𝒑₁₍ₜ₎(M, u¹, u²), 𝒑₁₍ₜ₎(M, u¹, u²)) # 第1基本量
g₍ₜ₎₁₂(M, u¹, u²) = dot(𝒑₁₍ₜ₎(M, u¹, u²), 𝒑₂₍ₜ₎(M, u¹, u²)) # 第1基本量
g₍ₜ₎₂₁(M, u¹, u²) = dot(𝒑₂₍ₜ₎(M, u¹, u²), 𝒑₁₍ₜ₎(M, u¹, u²)) # 第1基本量
g₍ₜ₎₂₂(M, u¹, u²) = dot(𝒑₂₍ₜ₎(M, u¹, u²), 𝒑₂₍ₜ₎(M, u¹, u²)) # 第1基本量
g₍ₜ₎(M, u¹, u²) = @SMatrix [g₍ₜ₎₁₁(M, u¹, u²) g₍ₜ₎₁₂(M, u¹, u²); g₍ₜ₎₂₁(M, u¹, u²) g₍ₜ₎₂₂(M, u¹, u²)]
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 1875 | # Luxor related
function _changeunit(path_svg, units::Pair{String,String})
old_unit, new_unit = units
acceptable_units = ("px", "in", "pt", "pc", "cm", "mm")
if !(new_unit in acceptable_units)
error("The unit $(new_unit) is not supported in SVG format.")
end
script = read(path_svg, String)
lines = split(script, "\n")
lines[2] = replace(lines[2], "$(old_unit)\"" => "$(new_unit)\"")
write(path_svg, join(lines, "\n"))
end
function _colorbar(; max = 1.000, filename = "ColorBar.png", width = 100)
up = 4
down = -4
right = 4.6
right = 6.2
left = -2
Length = 3.5
FontSize = 0.85
unit = width / (right - left)
Thickness = unit / 10
Drawing(round(width), round((up - down) * unit), filename)
Luxor.origin(-left * unit, up * unit)
setblend(Luxor.blend(Point(0, -Length * unit), Point(0, Length * unit), "red", "cyan"))
box(BasicBSplineExporter._luxor_pt([-0.9, 0], unit), 1.8 * unit, 7 * unit, :fill)
sethue("Black")
fontface("JuliaMono")
fontsize(unit * FontSize)
setline(Thickness)
setlinecap("round")
text(" " * @sprintf("%.6f", max), BasicBSplineExporter._luxor_pt([1.4, Length - 0.28 * FontSize], unit))
text(" " * @sprintf("%.6f", 0), BasicBSplineExporter._luxor_pt([1.4, -0.28 * FontSize], unit))
text("-" * @sprintf("%.6f", max), BasicBSplineExporter._luxor_pt([1.4, -Length - 0.28 * FontSize], unit))
line(BasicBSplineExporter._luxor_pt([0.5, 0], unit), BasicBSplineExporter._luxor_pt([1.2, 0], unit), :stroke)
line(
BasicBSplineExporter._luxor_pt([0.5, -Length], unit),
BasicBSplineExporter._luxor_pt([1.2, -Length], unit),
:stroke,
)
line(
BasicBSplineExporter._luxor_pt([0.5, Length], unit),
BasicBSplineExporter._luxor_pt([1.2, Length], unit),
:stroke,
)
finish()
end
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 5067 | """
initial_state(D)
Compute the initial state, by solving a ODE of center curve.
"""
function initial_state(D::Tuple{ClosedInterval{<:Real}, ClosedInterval{<:Real}}, splitat=Float64[])
D₁, D₂ = D
M = _positioning(_initialize(D₁, D₂, splitat))
comment = "Initial state - domain: " * repr([endpoints(D₁)...]) * "×" * repr([endpoints(D₂)...])
info = Dict(["type" => "initial"])
step = Step(M, comment, info)
steptree = StepTree()
addstep!(steptree, step, 0)
end
"""
initial_state!(steptree, D)
Compute the initial state, by solving a ODE of center curve.
"""
function initial_state!(steptree, D::Tuple{ClosedInterval{<:Real}, ClosedInterval{<:Real}}, splitat=Float64[])
D₁, D₂ = D
M = _positioning(_initialize(D₁, D₂, splitat))
comment = "Initial state - domain: " * repr([endpoints(D₁)...]) * "×" * repr([endpoints(D₂)...])
info = Dict(["type" => "initial"])
step = Step(M, comment, info)
addstep!(steptree, step, 0)
end
# Coefficient matrix of the center-curve ODE
A(t, D₂) = @SMatrix [
ṡ₍₀₎(t, D₂)/s₍₀₎(t, D₂) -𝜅₍₀₎(t, D₂)*s₍₀₎(t, D₂)
𝜅₍₀₎(t, D₂)*s₍₀₎(t, D₂) ṡ₍₀₎(t, D₂)/s₍₀₎(t, D₂)
]
ω(t, D₂) = abs(s₍₀₎(t, D₂))/2B̃(t, D₂)
function _divide_D₁(D₁::ClosedInterval{<:Real}, D₂::ClosedInterval{<:Real})
t₋ = minimum(D₁)
t₊ = maximum(D₁)
nodes, weights = gausslegendre(10)
t2 = Float64(t₋)
ts = [t2]
Ls = Float64[]
while true
t1 = t2
t2 = t1+1/ω(t1, D₂)
for _ in 1:10
nodes_shifted = t1 .+ (nodes .+ 1) ./ 2 .* (t2-t1)
L12 = dot(ω.(nodes_shifted, Ref(D₂)), weights)*(t2-t1)/2
t2 += (1-L12)/ω(t2, D₂)
end
if t2 < t₊
push!(ts, t2)
push!(Ls, L12)
elseif iseven(length(ts))
t2 = t₊
nodes_shifted = t1 .+ (nodes .+ 1) ./ 2 .* (t2-t1)
L12 = dot(ω.(nodes_shifted, Ref(D₂)), weights)*(t2-t1)/2
push!(ts, t2)
push!(Ls, L12)
break
else
t2 = (t2+t₊)/2
nodes_shifted = t1 .+ (nodes .+ 1) ./ 2 .* (t2-t1)
L12 = dot(ω.(nodes_shifted, Ref(D₂)), weights)*(t2-t1)/2
push!(ts, t2)
push!(Ls, L12)
t2 = t₊
nodes_shifted = t1 .+ (nodes .+ 1) ./ 2 .* (t2-t1)
L12 = dot(ω.(nodes_shifted, Ref(D₂)), weights)*(t2-t1)/2
push!(ts, t2)
push!(Ls, L12)
break
end
end
l = length(ts)
for _ in 1:10
L̄ = mean(Ls)
for i in 2:l-1
ΔL = sum(Ls[1:i-1]) - L̄*(i-1)
ts[i] -= ΔL / ω(ts[i], D₂)
end
for i in 1:l-1
t1 = ts[i]
t2 = ts[i+1]
nodes_shifted = t1 .+ (nodes .+ 1) ./ 2 .* (t2-t1)
L12 = dot(ω.(nodes_shifted, Ref(D₂)), weights)*(t2-t1)/2
Ls[i] = L12
end
end
return ts
end
function _initialize(D₁, D₂)
# Definitions for the center curve
# 1e-14 is ad-hoc number to avoid non-smooth singularity on the boundary.
t₋ = minimum(D₁) + 1e-14
t₊ = maximum(D₁) - 1e-14
# Number of divisions for ODE
N = 100
# Initial condition for ODE
𝒄̇₀ = SVector(s₍₀₎(t₋, D₂), 0.0)
# Solve ODE 𝒄̈₍ₛ₎(t) = A(t)𝒄̇₍ₛ₎(t) with Runge-Kutta method (and interpolation)
Δt = (t₊ - t₋) / N
ts = range(t₋, stop = t₊, length = N + 1)
𝒄̇₍ₛ₎s = zeros(SVector{2,Float64}, N + 1)
𝒄̇₍ₛ₎s[1] = 𝒄̇₀
for i in 1:N
t = ts[i]
𝒄̇ = 𝒄̇₍ₛ₎s[i]
k1 = A(t, D₂) * 𝒄̇
k2 = A(t + Δt / 2, D₂) * (𝒄̇ + k1 * Δt / 2)
k3 = A(t + Δt / 2, D₂) * (𝒄̇ + k2 * Δt / 2)
k4 = A(t + Δt, D₂) * (𝒄̇ + k3 * Δt)
Δ𝒄̇₀ = Δt * (k1 + 2k2 + 2k3 + k4) / 6
𝒄̇₍ₛ₎s[i+1] = 𝒄̇ + Δ𝒄̇₀
end
𝒄̇₍ₛ₎ = _interpolate2(ts, 𝒄̇₍ₛ₎s, A(t₋, D₂)*𝒄̇₀)
# Integrate 𝒄̇₍ₛ₎ and obtain the center-curve 𝒄₍ₛ₎
𝒄₍ₛ₎(t) = unbounded_mapping(integrate(𝒄̇₍ₛ₎), t)
# Construct initial state M₍ₛ₎
𝒒₍ₛ₎₁(t) = unbounded_mapping(𝒄̇₍ₛ₎, t)
𝒒₍ₛ₎₂(t) = (@SMatrix [g₍₀₎₁₂(t, D₂) -𝝊₍₀₎(t, D₂); 𝝊₍₀₎(t, D₂) g₍₀₎₁₂(t, D₂)]) * 𝒒₍ₛ₎₁(t) / g₍₀₎₁₁(t, D₂)
p₁ = 3
p₂ = 1
k₁ = KnotVector(_divide_D₁(D₁, D₂)) + p₁ * KnotVector([extrema(D₁)...])
k₂ = KnotVector(repeat(collect(extrema(D₂)), inner = 2))
P₁ = BSplineSpace{p₁}(k₁)
P₂ = BSplineSpace{p₂}(k₂)
# Approximate 𝒄 with B-spline curve
𝒓 = fittingcontrolpoints(𝒒₍ₛ₎₂, P₁)
𝒎 = fittingcontrolpoints(𝒄₍ₛ₎, P₁)
b = width(D₂) / 2
𝒂 = hcat(𝒎 - b * 𝒓, 𝒎 + b * 𝒓)
# c = (t₋+t₊)/2
# 𝒑₍ₛ₎(u¹, u²) = 𝒄₍ₛ₎(u¹) + (u²-c)*𝒒₍ₛ₎₂(u¹)
# 𝒂 = fittingcontrolpoints(𝒑₍ₛ₎, P₁, P₂)
M = BSplineManifold(𝒂, (P₁, P₂))
M′ = refinement(M, (Val(0), Val(1)))
return M′
end
function _initialize(D₁, D₂, splitat)
_splitat = unique!(sort!(vcat([u¹ for u¹ in splitat if u¹ in OpenInterval(D₁)], extrema(D₁)...)))
intervals = [_splitat[i].._splitat[i+1] for i in 1:length(_splitat)-1]
manifolds = [_initialize(interval, D₂) for interval in intervals]
M = _merge(manifolds)
return M
end
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 8169 | mutable struct Step{T<:BSplineManifold{2}}
manifold::T
comment::String
info::Dict
function Step(manifold::BSplineManifold{2}, comment, info)
new{typeof(manifold)}(manifold, comment, info)
end
end
struct StepTree
steps::Vector{Step}
parents::Vector{Int}
pinned::Vector{Bool}
function StepTree()
new(Vector{Step}(), Vector{Int}(), Vector{Bool}())
end
end
function addstep!(steptree::StepTree, step::Step, parent::Int)
push!(steptree.steps, step)
push!(steptree.parents, parent)
push!(steptree.pinned, false)
return steptree
end
function parent_id(steptree, id)
steptree.parents[id]
end
function nodeseries(steptree, i)
series = [i]
while i ≠ 0
i = parent_id(steptree, i)
pushfirst!(series, i)
end
return series
end
function _tree_as_string(steptree::StepTree)
n = length(steptree.steps)
serieses = [nodeseries(steptree, i) for i in 1:n]
sort!(serieses)
lowstrings = String[]
for i in 1:n
l = length(serieses[i])
key = serieses[i][end]
step = steptree.steps[key]
pinned = steptree.pinned[key]
comment = "📌 "^pinned * step.comment
if l == 2
lowstring = "$(key): " * comment
push!(lowstrings, lowstring)
elseif l ≥ 3
lowstring = " "^(l - 3) * "└─$(key): " * comment
push!(lowstrings, lowstring)
for j in 1:(i-1)
chars = collect(lowstrings[end-j])
if chars[2(l-3)+1] == ' '
lowstrings[end-j] = join(chars[1:2(l-3)]) * "│" * join(chars[2(l-3)+2:end])
elseif chars[2(l-3)+1] == '└'
lowstrings[end-j] = join(chars[1:2(l-3)]) * "├" * join(chars[2(l-3)+2:end])
break
else
break
end
end
end
end
outsting = ""
for s in lowstrings
outsting = outsting * s * "\n"
end
return outsting
end
function Base.show(io::IO, steptree::StepTree)
print(io, _tree_as_string(steptree))
end
function _validindex(steptree, index::Int)
if index == 0
return length(steptree.steps)
else
return index
end
end
function loadM(steptree; index = 0)
if index == 0
index = length(steptree.steps)
end
M = steptree.steps[index].manifold
return M
end
function export_all_steps(
dir,
steptree::StepTree;
maximumstrain = 0,
xlims = (-2, 2),
ylims = (-2, 2),
mesh = (20, 1),
unitlength::Tuple{<:Real,<:AbstractString} = (50, "mm"),
colorbarsize = 0.3,
)
mkpath(dir)
for i in eachindex(steptree.steps)
M = steptree.steps[i].manifold
export_one_step(
dir,
M,
i,
maximumstrain = maximumstrain,
xlims = xlims,
ylims = ylims,
mesh = mesh,
unitlength = unitlength,
colorbarsize = colorbarsize,
)
end
export_pinned_steps(dir, steptree, xlims = xlims, ylims = ylims, mesh = mesh, unitlength = unitlength)
write(joinpath(dir, "log.txt"), _tree_as_string(steptree))
end
function export_one_step(
dir,
M::BSplineManifold{2},
index::Integer;
maximumstrain = 0,
xlims = nothing,
ylims = nothing,
mesh = (20, 1),
unitlength::Tuple{<:Real,<:AbstractString} = (100, "mm"),
colorbarsize = 0.3,
)
if isnothing(xlims)
xs = [p[1] for p in controlpoints(M)]
xlims = floor(Int, minimum(xs))-1, ceil(Int, maximum(xs))+1
end
if isnothing(ylims)
ys = [p[1] for p in controlpoints(M)]
ylims = floor(Int, minimum(ys))-1, ceil(Int, maximum(ys))+1
end
if maximumstrain ≤ 0
MS = _compute_minmax_strain(M)
maximumstrain = max(-MS[1], MS[2])
end
aa = 5 # magnification parameter for antialias
width = (xlims[2] - xlims[1]) * unitlength[1]
normalized_strain(u¹, u²) = E⁽⁰⁾₁₁(M, u¹, u²) / maximumstrain # bounded in -1 to 1
mkpath(joinpath(dir, "bspline"))
mkpath(joinpath(dir, "strain"))
mkpath(joinpath(dir, "colorbar"))
mkpath(joinpath(dir, "combined"))
path_svg_bspline = joinpath(dir, "bspline", "bspline-$(index).svg")
path_png_bspline = joinpath(dir, "bspline", "bspline-$(index).png")
path_png_strain = joinpath(dir, "strain", "strain-$(index).png")
path_png_colorbar = joinpath(dir, "colorbar", "colorbar-$(index).png")
path_png_combined = joinpath(dir, "combined", "combined-$(index).png")
colorfunc(u¹, u²) = normalized_strain(u¹, u²) * RGB(0.5, -0.5, -0.5) + RGB(0.5, 0.5, 0.5) # red to cyan
save_svg(path_svg_bspline, M, xlims = xlims, ylims = ylims, mesh = mesh, unitlength = Int(unitlength[1]))
save_png(path_png_bspline, M, xlims = xlims, ylims = ylims, mesh = mesh, unitlength = Int(unitlength[1]))
save_png(path_png_strain, M, colorfunc, xlims = xlims, ylims = ylims, unitlength = Int(aa * unitlength[1]))
_colorbar(max = maximumstrain, filename = path_png_colorbar, width = aa * colorbarsize * width)
_changeunit(path_svg_bspline, "pt" => unitlength[2])
img_bspline = load(path_png_bspline)
img_strain = load(path_png_strain)
img_colorbar = load(path_png_colorbar)
img_bspline = convert(Array{RGBA{Float64},2}, img_bspline)
img_strain = convert(Array{RGBA{Float64},2}, img_strain)
img_colorbar = convert(Array{RGBA{Float64},2}, img_colorbar)
size_bspline = size(img_bspline)
size_strain = size(img_strain)
size_colorbar = size(img_colorbar)
img_bspline_white_background =
ColorBlendModes.blend.(RGB(1, 1, 1), img_bspline, op = ColorBlendModes.CompositeSourceOver)
img_strain_white_background =
ColorBlendModes.blend.(RGB(1, 1, 1), img_strain, op = ColorBlendModes.CompositeSourceOver)
Δ = size_strain .- size_colorbar
img_offset_colorbar = OffsetArray(img_colorbar, Δ...)
img_strain_with_colorbar = copy(img_strain_white_background)
img_strain_with_colorbar[axes(img_offset_colorbar)...] =
ColorBlendModes.blend.(
img_strain_with_colorbar[axes(img_offset_colorbar)...],
img_offset_colorbar,
op = ColorBlendModes.CompositeSourceOver,
)
img_strain_with_colorbar =
[RGB(mean(img_strain_with_colorbar[5i-4:5i, 5j-4:5j])) for i in 1:size_bspline[1], j in 1:size_bspline[2]]
# img_strain_with_colorbar = imresize(img_strain_with_colorbar, (800,800)) # could be coded like this, but the previous one is better for anti-alias
img_combined = hcat(img_bspline_white_background, img_strain_with_colorbar)
save(path_png_combined, img_combined)
end
"""
export_pinned_steps(; unitlength = (10, "mm"), cutout = (0.1, 5), mesh::Int = 60)
Export all pinned steps for final output
"""
function export_pinned_steps(
dir::AbstractString,
steptree::StepTree;
xlims = (-2, 2),
ylims = (-2, 2),
mesh = (20, 1),
unitlength::Tuple{<:Real,<:AbstractString} = (50, "mm"),
# cutout=(0.1, 5),
)
dir_pinned = joinpath(dir, "pinned")
# Delete current pinned directory
rm(dir_pinned, recursive = true, force = true)
# Make path to pinned directory
mkpath(dir_pinned)
pinned_steps = findall(steptree.pinned)
paths_output = Vector{String}(undef, length(pinned_steps))
for (i, index) in enumerate(pinned_steps)
M = loadM(steptree, index = index)
path_svg = joinpath(dir_pinned, "pinned-$(index).svg")
save_svg(path_svg, M, xlims = xlims, ylims = ylims, mesh = mesh, unitlength = unitlength[1], points = false)
_changeunit(path_svg, "pt" => unitlength[2])
paths_output[i] = path_svg
end
return paths_output
end
function Base.show(io::IO, mime::MIME"image/png", steptree::StepTree)
dir = mktempdir()
index = length(steptree.steps)
M = steptree.steps[index].manifold
export_one_step(dir, M, index)
img = RGB.(load(joinpath(dir, "combined", "combined-$(index).png")))
Base.show(io, mime, img)
end
Base.showable(::MIME"image/png", steptree::StepTree) = !iszero(length(steptree.steps))
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 11493 | function _defaultorientation(n₁, n₂)
return ([(n₁ + 1) ÷ 2, (n₂ + 1) ÷ 2, 1], [(n₁ + 1) ÷ 2, (n₂ + 1) ÷ 2, 2], [(n₁ + 1) ÷ 2, (n₂ + 1) ÷ 2 - 1, 1])
end
function _fixthreepoints(n₁, n₂)
return (
[1, (n₂ + 1) ÷ 2, 1],
[1, (n₂ + 1) ÷ 2, 2],
[(n₁ + 1) ÷ 2, (n₂ + 1) ÷ 2, 1],
[(n₁ + 1) ÷ 2, (n₂ + 1) ÷ 2, 2],
[n₁, (n₂ + 1) ÷ 2, 1],
[n₁, (n₂ + 1) ÷ 2, 2],
)
end
function _fixfivepoints(n₁, n₂)
mid1 = (n₁ + 1) ÷ 2
mid2 = (n₂ + 1) ÷ 2
Δ4 = (n₁ + 1) ÷ 4
return (
[1, mid2, 1],
[1, mid2, 2],
[mid1-Δ4, mid2, 1],
[mid1-Δ4, mid2, 2],
[mid1, mid2, 1],
[mid1, mid2, 2],
[mid1+Δ4, mid2, 1],
[mid1+Δ4, mid2, 2],
[n₁, mid2, 1],
[n₁, mid2, 2],
)
end
_abbstr(t::Week) = string(t.value) * "w "
_abbstr(t::Day) = string(t.value) * "d "
_abbstr(t::Hour) = string(t.value) * "h "
_abbstr(t::Minute) = string(t.value) * "m "
_abbstr(t::Second) = string(t.value) * "s "
_abbstr(t::Millisecond) = string(t.value) * "ms "
_abbstr(t::Vector{Period}) = *(_abbstr.(t)...)[1:end-1]
function _seconds2string(Δt::Float64)
periods = Dates.canonicalize(Dates.CompoundPeriod(Dates.Millisecond(floor(1000Δt)))).periods
if isempty(periods)
return "0ms"
else
return _abbstr(periods)
end
end
"""
newton_onestep(steptree, parent::Int=0; fixingmethod=:default)
Compute one step of Newton-Raphson method
"""
function newton_onestep!(steptree, parent::Int = 0; fixingmethod = :default)
if fixingmethod == :default
fixed = _defaultorientation
elseif fixingmethod == :fix3points
fixed = _fixthreepoints
elseif fixingmethod == :fix5points
fixed = _fixfivepoints
else
error("No method for $(fixingmethod). Use :default or :fix3points.")
end
parent = _validindex(steptree, parent)
M = loadM(steptree, index = parent)
n₁, n₂ = dim.(bsplinespaces(M))
iseven(n₁) && error("n₁ should be odd numbers")
iseven(n₂) && error("n₂ should be odd numbers")
M = _positioning(M)
M, F, Ǧ, Δt = _newton(M, fixed)
comment =
"Newton onestep - residual norm: " *
(@sprintf("%.4e", norm(F))) *
", Δa norm: " *
(@sprintf("%.4e", norm(Ǧ))) *
", computation time: " *
_seconds2string(Δt)
info = Dict(["type" => "newton", "fixingmethod" => string(fixingmethod)])
step = Step(M, comment, info)
addstep!(steptree, step, parent)
end
function _newton(M::BSplineManifold{2,p,<:SVector}, fix_method) where {p}
𝒂 = _arrayofvector2array(controlpoints(M))
P = bsplinespaces(M)
n₁, n₂ = dim.(P)
lineup(I₁, I₂, i) = (i - 1) * n₁ * n₂ + (I₂ - 1) * n₁ + (I₁ - 1) + 1
t₀ = time()
H = _matrix_H(M)
F = _vector_F(M)
t₁ = time()
N = 2n₁ * n₂
_fixed = sort(collect((i -> lineup(i...)).(fix_method(n₁, n₂))))
_unfixed = deleteat!(collect(1:N), _fixed)
F = reshape(F, N)
H = reshape(H, N, N)
𝒂 = 𝒂ₒ = reshape(𝒂, N)
Ȟ = H[_unfixed, _unfixed]
𝒂̌ = 𝒂[_unfixed]
F̌ = F[_unfixed]
Ǧ = Ȟ \ F̌
𝒂̌ = 𝒂̌ - Ǧ
for i in _fixed
insert!(𝒂̌, i, 𝒂ₒ[i])
end
𝒂 = reshape(𝒂̌, n₁, n₂, 2)
M = BSplineManifold(_array2arrayofvector(𝒂), P)
return M, F, Ǧ, t₁ - t₀
end
function _matrix_H(M::BSplineManifold{2,p}) where {p}
rrr = StaticArrays.SUnitRange{1,10}()
𝒂 = controlpoints(M)
P₁, P₂ = P = bsplinespaces(M)
p₁, p₂ = p
k₁, k₂ = k = knotvector.(P)
l₁, l₂ = length.(k)
n₁, n₂ = dim.(P)
H = zeros(n₁, n₂, 2, n₁, n₂, 2)
_nodes, _weights = gausslegendre(10)
nodes = SVector{10,Float64}(_nodes)
weights = SVector{10,Float64}(_weights)
nodes₁ = nodes
nodes₂ = nodes
weights₁ = weights
weights₂ = weights
for s₁ in 1:l₁-1, s₂ in 1:l₂-1
a₁ = k₁[s₁]
b₁ = k₁[s₁+1]
a₂ = k₂[s₂]
b₂ = k₂[s₂+1]
w₁ = b₁ - a₁
w₂ = b₂ - a₂
iszero(w₁) && continue
iszero(w₂) && continue
dnodes₁ = (w₁ * nodes₁ .+ (a₁ + b₁)) / 2
dnodes₂ = (w₂ * nodes₂ .+ (a₂ + b₂)) / 2
for ii1 in rrr, ii2 in rrr
u¹, u² = dnodes₁[ii1], dnodes₂[ii2]
g₁₁ = g₍₀₎₁₁(u¹, u²)
g₁₂ = g₂₁ = g₍₀₎₁₂(u¹, u²)
g₂₂ = g₍₀₎₂₂(u¹, u²)
g = @SMatrix [g₁₁ g₁₂; g₂₁ g₂₂]
g⁻ = inv(g)
𝝊 = sqrt(det(g))
B₁ = bsplinebasisall(P₁, s₁ - p₁, u¹)
B₂ = bsplinebasisall(P₂, s₂ - p₂, u²)
Ḃ₁ = bsplinebasisall(BSplineDerivativeSpace{1}(P₁), s₁ - p₁, u¹)
Ḃ₂ = bsplinebasisall(BSplineDerivativeSpace{1}(P₂), s₂ - p₂, u²)
Q₁ = sum(𝒂[J₁+(s₁-p₁)-1, J₂+(s₂-p₂)-1] * Ḃ₁[J₁] * B₂[J₂] for J₁ in 1:p₁+1, J₂ in 1:p₂+1)
Q₂ = sum(𝒂[J₁+(s₁-p₁)-1, J₂+(s₂-p₂)-1] * B₁[J₁] * Ḃ₂[J₂] for J₁ in 1:p₁+1, J₂ in 1:p₂+1)
Q = hcat(Q₁, Q₂)
QQ = @SMatrix [Q[1, m] * Q[1, n] + Q[2, m] * Q[2, n] for m in 1:2, n in 1:2]
weight1 = weights₁[ii1]
weight2 = weights₂[ii2]
C¹¹¹¹ = C(1, 1, 1, 1, g⁻)
C¹¹¹² = C(1, 1, 1, 2, g⁻)
C¹¹²² = C(1, 1, 2, 2, g⁻)
C¹²¹² = C(1, 2, 1, 2, g⁻)
C¹²²² = C(1, 2, 2, 2, g⁻)
C²²²² = C(2, 2, 2, 2, g⁻)
C¹¹²¹ = C¹²¹¹ = C²¹¹¹ = C¹¹¹²
C²²¹¹ = C¹¹²²
C¹²²¹ = C²¹¹² = C²¹²¹ = C¹²¹²
C²¹²² = C²²¹² = C²²²¹ = C¹²²²
for i₁ in 1:p₁+1, i₂ in 1:p₂+1, i in 1:2, r₁ in 1:p₁+1, r₂ in 1:p₂+1, r in 1:2
I₁ = i₁ + (s₁ - p₁) - 1
R₁ = r₁ + (s₁ - p₁) - 1
I₂ = i₂ + (s₂ - p₂) - 1
R₂ = r₂ + (s₂ - p₂) - 1
Ni₁ = Ḃ₁[i₁] * B₂[i₂]
Ni₂ = B₁[i₁] * Ḃ₂[i₂]
Nr₁ = Ḃ₁[r₁] * B₂[r₂]
Nr₂ = B₁[r₁] * Ḃ₂[r₂]
s = C¹¹¹¹ * Ni₁ * Nr₁ * Q₁[i] * Q₁[r]
s += C¹¹¹² * Ni₁ * Nr₂ * Q₁[i] * Q₁[r]
s += C¹¹²¹ * Ni₁ * Nr₁ * Q₁[i] * Q₂[r]
s += C¹¹²² * Ni₁ * Nr₂ * Q₁[i] * Q₂[r]
s += C¹²¹¹ * Ni₁ * Nr₁ * Q₂[i] * Q₁[r]
s += C¹²¹² * Ni₁ * Nr₂ * Q₂[i] * Q₁[r]
s += C¹²²¹ * Ni₁ * Nr₁ * Q₂[i] * Q₂[r]
s += C¹²²² * Ni₁ * Nr₂ * Q₂[i] * Q₂[r]
s += C²¹¹¹ * Ni₂ * Nr₁ * Q₁[i] * Q₁[r]
s += C²¹¹² * Ni₂ * Nr₂ * Q₁[i] * Q₁[r]
s += C²¹²¹ * Ni₂ * Nr₁ * Q₁[i] * Q₂[r]
s += C²¹²² * Ni₂ * Nr₂ * Q₁[i] * Q₂[r]
s += C²²¹¹ * Ni₂ * Nr₁ * Q₂[i] * Q₁[r]
s += C²²¹² * Ni₂ * Nr₂ * Q₂[i] * Q₁[r]
s += C²²²¹ * Ni₂ * Nr₁ * Q₂[i] * Q₂[r]
s += C²²²² * Ni₂ * Nr₂ * Q₂[i] * Q₂[r]
if i == r
s += C¹¹¹¹ * Ni₁ * Nr₁ * (QQ[1, 1] - g₁₁) / 2
s += C¹¹¹² * Ni₁ * Nr₁ * (QQ[1, 2] - g₁₂) / 2
s += C¹¹²¹ * Ni₁ * Nr₁ * (QQ[2, 1] - g₂₁) / 2
s += C¹¹²² * Ni₁ * Nr₁ * (QQ[2, 2] - g₂₂) / 2
s += C¹²¹¹ * Ni₁ * Nr₂ * (QQ[1, 1] - g₁₁) / 2
s += C¹²¹² * Ni₁ * Nr₂ * (QQ[1, 2] - g₁₂) / 2
s += C¹²²¹ * Ni₁ * Nr₂ * (QQ[2, 1] - g₂₁) / 2
s += C¹²²² * Ni₁ * Nr₂ * (QQ[2, 2] - g₂₂) / 2
s += C²¹¹¹ * Ni₂ * Nr₁ * (QQ[1, 1] - g₁₁) / 2
s += C²¹¹² * Ni₂ * Nr₁ * (QQ[1, 2] - g₁₂) / 2
s += C²¹²¹ * Ni₂ * Nr₁ * (QQ[2, 1] - g₂₁) / 2
s += C²¹²² * Ni₂ * Nr₁ * (QQ[2, 2] - g₂₂) / 2
s += C²²¹¹ * Ni₂ * Nr₂ * (QQ[1, 1] - g₁₁) / 2
s += C²²¹² * Ni₂ * Nr₂ * (QQ[1, 2] - g₁₂) / 2
s += C²²²¹ * Ni₂ * Nr₂ * (QQ[2, 1] - g₂₁) / 2
s += C²²²² * Ni₂ * Nr₂ * (QQ[2, 2] - g₂₂) / 2
end
s *= 𝝊 * weight1 * weight2 * w₁ * w₂ / 2
H[I₁, I₂, i, R₁, R₂, r] += s
end
end
end
return H
end
function _vector_F(M::BSplineManifold{2,p}) where {p}
rrr = StaticArrays.SUnitRange{1,10}()
𝒂 = controlpoints(M)
P₁, P₂ = P = bsplinespaces(M)
p₁, p₂ = p
k₁, k₂ = k = knotvector.(P)
l₁, l₂ = length.(k)
n₁, n₂ = dim.(P)
F = zeros(n₁, n₂, 2)
_nodes, _weights = gausslegendre(10)
nodes = SVector{10,Float64}(_nodes)
weights = SVector{10,Float64}(_weights)
nodes₁ = nodes
nodes₂ = nodes
weights₁ = weights
weights₂ = weights
for s₁ in 1:l₁-1, s₂ in 1:l₂-1
a₁ = k₁[s₁]
b₁ = k₁[s₁+1]
a₂ = k₂[s₂]
b₂ = k₂[s₂+1]
w₁ = b₁ - a₁
w₂ = b₂ - a₂
iszero(w₁) && continue
iszero(w₂) && continue
dnodes₁ = (w₁ * nodes₁ .+ (a₁ + b₁)) / 2
dnodes₂ = (w₂ * nodes₂ .+ (a₂ + b₂)) / 2
for ii1 in rrr, ii2 in rrr
u¹, u² = dnodes₁[ii1], dnodes₂[ii2]
g₁₁ = g₍₀₎₁₁(u¹, u²)
g₁₂ = g₂₁ = g₍₀₎₁₂(u¹, u²)
g₂₂ = g₍₀₎₂₂(u¹, u²)
g = @SMatrix [g₁₁ g₁₂; g₂₁ g₂₂]
g⁻ = inv(g)
𝝊 = sqrt(det(g))
B₁ = bsplinebasisall(P₁, s₁ - p₁, u¹)
B₂ = bsplinebasisall(P₂, s₂ - p₂, u²)
Ḃ₁ = bsplinebasisall(BSplineDerivativeSpace{1}(P₁), s₁ - p₁, u¹)
Ḃ₂ = bsplinebasisall(BSplineDerivativeSpace{1}(P₂), s₂ - p₂, u²)
Q₁ = sum(𝒂[J₁+(s₁-p₁)-1, J₂+(s₂-p₂)-1] * Ḃ₁[J₁] * B₂[J₂] for J₁ in 1:p₁+1, J₂ in 1:p₂+1)
Q₂ = sum(𝒂[J₁+(s₁-p₁)-1, J₂+(s₂-p₂)-1] * B₁[J₁] * Ḃ₂[J₂] for J₁ in 1:p₁+1, J₂ in 1:p₂+1)
Q = hcat(Q₁, Q₂)
QQ = @SMatrix [Q[1, m] * Q[1, n] + Q[2, m] * Q[2, n] for m in 1:2, n in 1:2]
weight1 = weights₁[ii1]
weight2 = weights₂[ii2]
C¹¹¹¹ = C(1, 1, 1, 1, g⁻)
C¹¹¹² = C(1, 1, 1, 2, g⁻)
C¹¹²² = C(1, 1, 2, 2, g⁻)
C¹²¹² = C(1, 2, 1, 2, g⁻)
C¹²²² = C(1, 2, 2, 2, g⁻)
C²²²² = C(2, 2, 2, 2, g⁻)
C¹¹²¹ = C¹²¹¹ = C²¹¹¹ = C¹¹¹²
C²²¹¹ = C¹¹²²
C¹²²¹ = C²¹¹² = C²¹²¹ = C¹²¹²
C²¹²² = C²²¹² = C²²²¹ = C¹²²²
for i₁ in 1:p₁+1, i₂ in 1:p₂+1, i in 1:2
I₁ = i₁ + (s₁ - p₁) - 1
I₂ = i₂ + (s₂ - p₂) - 1
Ni₁ = Ḃ₁[i₁] * B₂[i₂]
Ni₂ = B₁[i₁] * Ḃ₂[i₂]
s = C¹¹¹¹ * Ni₁ * Q₁[i] * (QQ[1, 1] - g₁₁) / 2
s += C¹¹¹² * Ni₁ * Q₁[i] * (QQ[1, 2] - g₁₂) / 2
s += C¹¹²¹ * Ni₁ * Q₁[i] * (QQ[2, 1] - g₂₁) / 2
s += C¹¹²² * Ni₁ * Q₁[i] * (QQ[2, 2] - g₂₂) / 2
s += C¹²¹¹ * Ni₁ * Q₂[i] * (QQ[1, 1] - g₁₁) / 2
s += C¹²¹² * Ni₁ * Q₂[i] * (QQ[1, 2] - g₁₂) / 2
s += C¹²²¹ * Ni₁ * Q₂[i] * (QQ[2, 1] - g₂₁) / 2
s += C¹²²² * Ni₁ * Q₂[i] * (QQ[2, 2] - g₂₂) / 2
s += C²¹¹¹ * Ni₂ * Q₁[i] * (QQ[1, 1] - g₁₁) / 2
s += C²¹¹² * Ni₂ * Q₁[i] * (QQ[1, 2] - g₁₂) / 2
s += C²¹²¹ * Ni₂ * Q₁[i] * (QQ[2, 1] - g₂₁) / 2
s += C²¹²² * Ni₂ * Q₁[i] * (QQ[2, 2] - g₂₂) / 2
s += C²²¹¹ * Ni₂ * Q₂[i] * (QQ[1, 1] - g₁₁) / 2
s += C²²¹² * Ni₂ * Q₂[i] * (QQ[1, 2] - g₁₂) / 2
s += C²²²¹ * Ni₂ * Q₂[i] * (QQ[2, 1] - g₂₁) / 2
s += C²²²² * Ni₂ * Q₂[i] * (QQ[2, 2] - g₂₂) / 2
s *= 𝝊 * weight1 * weight2 * w₁ * w₂ / 2
F[I₁, I₂, i] += s
end
end
end
return F
end
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 441 | """
pin(steptree, parent::Int = 0)
Add a pin 📌 for the given index
"""
function pin!(steptree, index::Int = 0)
index = _validindex(steptree, index)
steptree.pinned[index] = true
return steptree
end
"""
unpin(steptree, index::Integer)
Remeve the pin 📌 with the given index
"""
function unpin!(steptree, index::Int = 0)
index = _validindex(steptree, index)
steptree.pinned[index] = false
return steptree
end
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | code | 6962 | using Test
using IntervalSets
using StaticArrays
using Images
using LinearAlgebra
using BasicBSpline
using ElasticSurfaceEmbedding
using Aqua
import ElasticSurfaceEmbedding.𝝂
import ElasticSurfaceEmbedding.𝒑₁₍ₜ₎
import ElasticSurfaceEmbedding.𝒑₂₍ₜ₎
Aqua.test_all(ElasticSurfaceEmbedding; ambiguities=false)
function L²(f, B)
n = 240
𝟙 = 0.99999
xs = range(-B * 𝟙, stop = B * 𝟙, length = n + 1)
Δ = 2 * B * 𝟙 / n
return sqrt(Δ * (2 * sum(f.(xs) .^ 2) - f(xs[begin])^2 - f(xs[end])^2) / 2)
end
function L²(f, g, B)
return L²(x -> f(x) - g(x), B)
end
function delta(f, B)
n = 10
𝟙 = 1 - 1e-8
xs = range(-B * 𝟙, stop = B * 𝟙, length = n + 1)
return maximum(f.(xs)) - minimum(f.(xs))
end
dir_result = joinpath(@__DIR__, "result")
rm(dir_result, recursive = true, force = true)
@testset "Rhomboid" begin
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹, u²) = SVector(u¹, u², u¹ + u²)
D = (-1.0 .. 1.0, -1.0 .. 1.0)
show_strain(D)
@test_logs (:info, "Strain - domain: [-1.0, 1.0]×[-1.0, 1.0]\nPredicted: (min: -0.0, max: 0.0)\n") show_strain(D)
result = initial_state(D)
M = ElasticSurfaceEmbedding.loadM(result)
𝒂 = controlpoints(M)
M, N = size(𝒂)
m = M ÷ 2 + 1
n = N ÷ 2 + 1
@test 𝒂[1, 1] ≈ [-√(3 / 2), -3 / √(2)]
@test 𝒂[1, n] ≈ [-√(3 / 2), -1 / √(2)]
@test 𝒂[1, N] ≈ [-√(3 / 2), 1 / √(2)]
@test 𝒂[m, 1] ≈ [0, -2 / √(2)]
@test 𝒂[m, n] ≈ [0, 0] atol = 1e-14
@test 𝒂[m, N] ≈ [0, 2 / √(2)]
@test 𝒂[M, 1] ≈ [√(3 / 2), -1 / √(2)]
@test 𝒂[M, n] ≈ [√(3 / 2), 1 / √(2)]
@test 𝒂[M, N] ≈ [√(3 / 2), 3 / √(2)]
newton_onestep!(result)
M = ElasticSurfaceEmbedding.loadM(result)
𝒂 = controlpoints(M)
M, N = size(𝒂)
m = M ÷ 2 + 1
n = N ÷ 2 + 1
@test 𝒂[1, 1] ≈ [-√(3 / 2), -3 / √(2)]
@test 𝒂[1, n] ≈ [-√(3 / 2), -1 / √(2)]
@test 𝒂[1, N] ≈ [-√(3 / 2), 1 / √(2)]
@test 𝒂[m, 1] ≈ [0, -2 / √(2)]
@test 𝒂[m, n] ≈ [0, 0] atol = 1e-14
@test 𝒂[m, N] ≈ [0, 2 / √(2)]
@test 𝒂[M, 1] ≈ [√(3 / 2), -1 / √(2)]
@test 𝒂[M, n] ≈ [√(3 / 2), 1 / √(2)]
@test 𝒂[M, N] ≈ [√(3 / 2), 3 / √(2)]
end
@testset "Planar" begin
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹, u²) = SVector(sin(u¹) * u², u² + cos(u¹) - u¹^2 / 5, 0.0)
# See https://www.desmos.com/calculator/4usvqpr0iu
D = (-1.0 .. 2.0, 1.0 .. 1.2)
show_strain(D)
result = initial_state(D)
M = ElasticSurfaceEmbedding.loadM(result)
@test norm([ElasticSurfaceEmbedding.E(M, u¹, u²) for u¹ in -0.9:0.1:1.9, u² in 1.05:0.05:1.15], Inf) < 1e-4
newton_onestep!(result)
refinement!(result, p₊=(0,1), k₊=suggest_knotvector(result))
newton_onestep!(result)
M = ElasticSurfaceEmbedding.loadM(result)
@test norm([ElasticSurfaceEmbedding.E(M, u¹, u²) for u¹ in -0.9:0.1:1.9, u² in 1.05:0.05:1.15], Inf) < 1e-5
@test result.pinned[2] == false
pin!(result, 2)
@test result.pinned[2] == true
unpin!(result, 2)
@test result.pinned[2] == false
end
@testset "Sphere-thin" begin
# For deriving analytical solution, see https://hackmd.io/@hyrodium/r1sCtEsLX
L = 20
B = 1 / 8
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹, u²) = SVector(cos(u¹) * cos(u²), sin(u¹) * cos(u²), sin(u²))
D = (-L .. L, -B .. B)
show_strain(D)
result = initial_state(D)
newton_onestep!(result)
newton_onestep!(result)
refinement!(
result,
p₊ = (0, 1),
k₊ = (KnotVector([-L + B, -L + 2B, -L + 3B, L - 3B, L - 2B, L - B]), KnotVector([-B / 2, 0.0, B / 2])),
)
newton_onestep!(result)
newton_onestep!(result)
M = ElasticSurfaceEmbedding.loadM(result)
𝒂 = controlpoints(M)
# Analytical
k = sqrt(4atanh(tan(B / 2)) / (sin(B) / cos(B)^2 + 2atanh(tan(B / 2))))
# Numerical computed
k̃ = 𝒑₁₍ₜ₎(M, 0, 0)[1]
# Approximated
k̂ = 1 - B^2 / 6
# If the strip is thin, the analytical result k can be approximated with k̂.
@test abs(log(k̃ / k)) < 1e-4
@test abs(log(k̂ / k)) < 1e-4
# Analytical
h′(u²) = √(1 - 𝝂 * (k^2 / cos(u²)^2 - 1))
# Numerical computed
h̃′(u²) = 𝒑₂₍ₜ₎(M, 0, u²)[2]
# Approximated
ĥ′(u²) = √(1 + 𝝂 * (1 - k̂^2)) - (𝝂 * k̂^2 * u²^2) / (2 * √(1 + 𝝂 * (1 - k̂^2)))
# If the strip is thin, the analytical result h′ can be approximated with ĥ′.
@test L²(h′, h̃′, B) / delta(h′, B) < 1e-2
@test L²(h′, ĥ′, B) / delta(h′, B) < 1e-2
end
@testset "Sphere-thick" begin
# For deriving analytical solution, see https://hackmd.io/@hyrodium/r1sCtEsLX
L = 20
B = 2 / 3
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹, u²) = SVector(cos(u¹) * cos(u²), sin(u¹) * cos(u²), sin(u²))
D = (-L .. L, -B .. B)
show_strain(D)
result = initial_state(D)
newton_onestep!(result)
newton_onestep!(result)
refinement!(
result,
p₊ = (0, 1),
k₊ = (KnotVector([-L + B, -L + 2B, -L + 3B, L - 3B, L - 2B, L - B]), KnotVector([-B / 2, 0.0, B / 2])),
)
newton_onestep!(result)
newton_onestep!(result)
M = ElasticSurfaceEmbedding.loadM(result)
𝒂 = controlpoints(M)
# Analytical
k = sqrt(4atanh(tan(B / 2)) / (sin(B) / cos(B)^2 + 2atanh(tan(B / 2))))
# Numerical computed
k̃ = 𝒑₁₍ₜ₎(M, 0, 0)[1]
# Approximated
k̂ = 1 - B^2 / 6
# If the strip is thick, the analytical result k cannot be approximated with k̂.
@test abs(log(k̃ / k)) < 1e-4
@test abs(log(k̂ / k)) > 1e-4
# Analytical
h′(u²) = √(1 - 𝝂 * (k^2 / cos(u²)^2 - 1))
# Numerical computed
h̃′(u²) = 𝒑₂₍ₜ₎(M, 0, u²)[2]
# Approximated
ĥ′(u²) = √(1 + 𝝂 * (1 - k̂^2)) - (𝝂 * k̂^2 * u²^2) / (2 * √(1 + 𝝂 * (1 - k̂^2)))
# If the strip is thick, the analytical result h′ cannot be approximated with ĥ′.
@test L²(h′, h̃′, B) / delta(h′, B) < 1e-2
@test L²(h′, ĥ′, B) / delta(h′, B) > 1e-2
## Note
# Try the following script to check the difference between analytical solution and numerical solution.
# using Plots
# 𝟙 = 1 - 1e-8
# plot(h′,-B*𝟙,B*𝟙)
# plot!(h̃′,-B*𝟙,B*𝟙)
# plot!(ĥ′,-B*𝟙,B*𝟙)
end
@testset "Paraboloid" begin
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹, u²) = SVector(u¹, u², u¹^2 + u²^2)
name = "Paraboloid"
N = 10
result = StepTree()
for i in 1:N
D = (-1.0 .. 1.0, (i - 1) / N .. i / N)
show_strain(D)
result = initial_state!(result, D)
newton_onestep!(result, fixingmethod = :fix3points)
newton_onestep!(result)
refinement!(result, p₊ = (0, 1), k₊ = (EmptyKnotVector(), KnotVector([(i - 1 / 2) / 10])))
newton_onestep!(result)
newton_onestep!(result)
pin!(result)
end
export_all_steps(joinpath(dir_result, "Paraboloid"), result)
files_pinned = readdir(joinpath(dir_result, "Paraboloid", "pinned"))
@test length(files_pinned) == N
# img_b = load(joinpath(dir_result,"Paraboloid","append","Paraboloid-5_append.png"))
# d = Euclidean()
# @test d(RGB.(img_a), RGB.(img_b)) < 0.0001
end
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | docs | 5313 | # Elastic Surface Embedding; Weaving Parer Strips
[](https://hyrodium.github.io/ElasticSurfaceEmbedding.jl/dev)
[](https://github.com/hyrodium/ElasticSurfaceEmbedding.jl/actions)
[](https://codecov.io/gh/hyrodium/ElasticSurfaceEmbedding.jl)
[](https://github.com/JuliaTesting/Aqua.jl)
[](https://arxiv.org/abs/2211.06372)
## JuliaCon2023 Talk! 📣🕙
I gave a lightning talk about this repository at JuliaCon2023!
[](https://www.youtube.com/watch?v=0gRVPLfZl7w)
* [Pretalx page](https://pretalx.com/juliacon2023/talk/RBHAER/)
* [Slides](https://www.docswell.com/s/hyrodium/5JL8EQ-JuliaCon2023)
## TL;DR
You can make a *holdable* smooth surface model with this repository.

The main part of this project is how to determine a planer shape from a strip on the target curved surface.
In mathematics, this mapping is called "embedding".
We determined the embedding by minimizing its elastic strain energy.
This is the meaning of "Elastic Surface Embedding".
## Overview: How to make a surface model
### step 1: Define a shape of a surface (and split it into strips)
The definition must consist of parametric mapping and its domain.
For example, a paraboloid can be parametrized as below.
$$
\begin{aligned}
\boldsymbol{p}_{[0]}(u^1,u^2)
&= \begin{pmatrix}
u^1 \\
u^2 \\
(u^1)^2+(u^2)^2
\end{pmatrix} &
(u^1, u^2)
\in [-1,1] \times [-1,1]
\end{aligned}
$$
The domain will be split into $D^{(i)}$.
$$
\begin{aligned}
D^{(i)}
= [-1,1] \times \left[\frac{i-1}{10}, \frac{i}{10}\right] \qquad (i = 1,...,10)
\end{aligned}
$$
### step 2: Numerical analysis
This is the main part.
Split the surface into strips, and compute the embeddings.
```julia
using ElasticSurfaceEmbedding
using IntervalSets
using StaticArrays
# Overload the shape definition
ElasticSurfaceEmbedding.surface(x,y) = SVector(x, y, x^2+y^2)
# (1) split the surface into strips
dom = [(-1..1, (i-1)/10..i/10) for i in 1:10]
# (2) Embed the strips onto a plane
res = auto_allsteps(dom)
export_pinned_steps("paraboloid", res)
```
For more information, read [this document](https://hyrodium.github.io/ElasticSurfaceEmbedding.jl/dev/run-julia/).
The image below is a result for the domain $D^{(1)}$.
<img src="docs/src/img/bspline_strain.png" width="800">
### step 3: Edit on your favorite vector graphics editor
The output files are in SVG format.
After editing the SVG files, you can print the graphics or cut papers with a laser cutting machine.
<img src="docs/src/img/inkscape.png" width="800">
### step 4: Craft a paper model
This is the final step.
Cut papers into strips, and weave them into the surface.
<img src="docs/src/img/assembling.png" width="800">
## Directions: If you like..
### ..making crafts :scissors:
| <img src="docs/src/img/craft.png" align="top" height="150" width="150"> | Print Appendix B from [my paper on arXiv](https://arxiv.org/abs/2211.06372), and <a href="https://hyrodium.github.io/ElasticSurfaceEmbedding.jl/dev/craft/">make your own surface model. <br> Laser cutting machine is useful, but it's not necessary. |
| --- | :-- |
### ..computing :octocat:
| <img src="docs/src/img/juliawolfram.png" align="top" height="150" width="150"> | Clone this repository, and run the [Julia code](https://hyrodium.github.io/ElasticSurfaceEmbedding.jl/dev/run-julia/) or [Wolfram code](https://github.com/hyrodium/ElasticSurfaceEmbedding-wolfram)! <br> Any issues and pull requests are welcomed. |
| --- | :-- |
### ..mathematics or physics :globe_with_meridians:
| <img src="docs/src/img/math.png" align="top" height="150" width="150"> | Read [our paper on arXiv](https://arxiv.org/abs/2211.06372). Here's our theoretical framework: <br> ・Mathematical model: [Nonlinear elasticity](https://www.sciencedirect.com/topics/engineering/geometric-nonlinearity) on [Riemannian manifold](https://en.m.wikipedia.org/wiki/Riemannian_manifold) <br> ・Geometric representation: [B-spline manifold](https://en.wikipedia.org/wiki/Non-uniform_rational_B-spline) <br> ・Numerical analysis: [Galerkin method](https://en.wikipedia.org/wiki/Galerkin_method), [Newton-Raphson method](https://en.wikipedia.org/wiki/Newton%27s_method) |
| --- | :-- |
### ..me! :bowtie:
| <img src="docs/src/img/me.jpg" align="top" height="150" width="150"> | Follow [my twitter account](https://twitter.com/Hyrodium). <br> Visit [my website](https://hyrodium.github.io/). <br> Read [my paper on arXiv](https://arxiv.org/abs/2211.06372). |
| --- | :-- |
## Gallery
<img src="docs/src/img/Paraboloid1.png" width="160"> <img src="docs/src/img/Paraboloid2.png" width="160"> <img src="docs/src/img/Paraboloid3.png" width="160"> <img src="docs/src/img/Paraboloid4.jpg" width="160"> <img src="docs/src/img/Paraboloid5.png" width="160">
<img src="docs/src/img/CatenoidHelicoid.gif" width="400">
<img src="docs/src/img/stereographicprojection.png" width="800">
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | docs | 42 | # [Gallery](@id gallery)
{{{democards}}}
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | docs | 60 | # API
```@autodocs
Modules = [ElasticSurfaceEmbedding]
```
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | docs | 888 | # [Craft](@id craft)
## Question: How can I make the model?
You can:
* Download a paraboloid example or a hyperbolic paraboloid example from [my paper on arXiv](https://arxiv.org/abs/2211.06372), and cut by scissors.
* It would be hard, but [not impossible](https://hyrodium.tumblr.com/post/178719972384).
* Download a paraboloid example or a hyperbolic paraboloid example from [my paper on arXiv](https://arxiv.org/abs/2211.06372), and cut by laser cutting machine.
* I'm using [Beambox by FLUX.inc](https://flux3dp.com/beambox/).
* [Compute the shape of paper strip](@ref numerical_computation), instead of downloading the paraboloid.
* This needs setup for Julia environment.
* Buy already cut pieces of paper at [Booth](https://hyrodium.booth.pm/) (In preparation).
* Ships from Japan.
## Images during the making process


| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | docs | 4267 | # Elastic Surface Embedding
## TL;DR
You can make a *holdable* smooth surface model with this repository.

The main part of this project is how to determine a planer shape from a strip on curved surface.
In mathematics, this mapping is called "embedding".
We determined the embedding by minimizing its elastic strain energy.
This is the meaning of "Elastic Surface Embedding".
## Overview: How to make a surface model
### step 1 : Define a shape of surface (and split into strips)
The definition must consists of parametric mapping and its domain.
For example, a paraboloid can be parametrized as below.
```math
\begin{aligned}
\bm{p}_{[0]}(u^1, u^2)
&= \begin{pmatrix}
u^1 \\
u^2 \\
(u^1)^2 + (u^2)^2
\end{pmatrix} \\
D
&= [-1,1]\times[-1,1]
\end{aligned}
```
The domain ``D`` will be split into ``D_i``.
```math
\begin{aligned}
D_i
&= [-1,1]\times\left[\frac{i-1}{10},\frac{i}{10}\right] & (i=1,\dots,10)
\end{aligned}
```
### step 2 : Numerical analysis
This is the main part.
Split the surface into pieces, and compute the Eucledian embedding.
For more information, read [numerical computation section](@ref numerical_computation).
The image below is a result for the domain ``D_1``.

### step 3 : Edit on vector graphics editor
The output files are SVG format.
After editing the svg files, you can print the graphics or cut papers by laser cutting machine.

### step 4 : Craft a paper model
This is the final step.
Cut papers into strips, and weave them into surface.

## Directions: If you like..
### ..making crafts ✂️
```@raw html
<div style="display:table; width:100%;">
<div style="display:table-cell; width:160px;">
<img src="img/craft_thumbnail.png">
</div>
<div style="display:table-cell; vertical-align:middle;">
Download and print a paraboloid example or a hyperbolic paraboloid example from <a href="https://arxiv.org/abs/2211.06372">my paper on arXiv</a>, and <a href="../craft">make your own surface model</a>.
Laser cutting machine is useful, but it's not necessary.
</div>
</div>
```
### ..computing 💻
```@raw html
<div style="display:table; width:100%;">
<div style="display:table-cell; width:160px;">
<img src="img/juliawolfram_thumbnail.png">
</div>
<div style="display:table-cell; vertical-align:middle;">
Clone this repository, and run <a href="../run-julia">the Julia script</a> or <a href="../run-wolfram">the Wolfram script</a>!
Any issues and pull requests are welcomed.
</div>
</div>
```
### ..mathematics or physics 🌐
```@raw html
<div style="display:table; width:100%;">
<div style="display:table-cell; width:160px;">
<img src="img/math_thumbnail.png">
</div>
<div style="display:table-cell; vertical-align:middle;">
Read <a href="https://arxiv.org/abs/2211.06372">my paper on arXiv</a>. Here's our theoretical framework:
<ul>
<li>Mathematical model: <a href="https://www.sciencedirect.com/topics/engineering/geometric-nonlinearity">Nonlinear elasticity</a> on <a href="https://en.m.wikipedia.org/wiki/Riemannian_manifold">Riemannian manifold</a></li>
<li>Geometric representation: <a href="https://en.wikipedia.org/wiki/Non-uniform_rational_B-spline">B-spline manifold</a></li>
<li>Numerical analysis: <a href="https://en.wikipedia.org/wiki/Galerkin_method">Galerkin method</a>, <a href="https://en.wikipedia.org/wiki/Newton%27s_method">Newton-Raphson method</a></li>
</ul>
</div>
</div>
```
### ..me! 🐢
```@raw html
<div style="display:table; width:100%;">
<div style="display:table-cell; width:160px;">
<img src="img/me_thumbnail.jpg">
</div>
<div style="display:table-cell; vertical-align:middle;">
<ul>
<li>Follow <a href="https://twitter.com/Hyrodium">my twitter account</a>!</li>
<li>Visit <a href="https://hyrodium.github.io/">my website</a>!</li>
<li>Read <a href="https://arxiv.org/abs/2211.06372">my paper on arXiv</a>!</li>
<li>Give star to <a href="https://github.com/hyrodium/ElasticSurfaceEmbedding.jl">this repository</a>!</li>
</ul>
</div>
</div>
```
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | docs | 5048 | # [Numerical computation](@id numerical_computation)
## Installation
On Julia's package mode, run the following commands.
```julia-repl
pkg> add IntervalSets
pkg> add StaticArrays
pkg> add BasicBSpline
pkg> add https://github.com/hyrodium/BasicBSplineExporter.jl
pkg> add https://github.com/hyrodium/ElasticSurfaceEmbedding.jl
```
## Overview of our method
Our theoretical framework is based on:
* Mathematical model: Nonlinear elasticity on Riemannian manifold
* Geometric representation: B-spline manifold
* Numerical analysis: Galerkin method, Newton-Raphson method
The computation process proceeds as shown in the following flowchart (from our paper):

For more information, read [our paper](https://arxiv.org/abs/2211.06372) or contact [me](https://twitter.com/Hyrodium)!
## Example: Paraboloid
Through this section, we treat a paraboloid ``z=x^2+y^2`` as an example.

### Load packages, and optional configuration
Load packages with the following script.
```@example paraboloid
using IntervalSets
using BasicBSpline
using StaticArrays
using ElasticSurfaceEmbedding
```
### Define the shape of surface
```@example paraboloid
ElasticSurfaceEmbedding.𝒑₍₀₎(u¹,u²) = SVector(u¹, u², u¹^2+u²^2)
```
```math
\begin{aligned}
\bm{p}_{[0]}(u^1, u^2)
&= \begin{pmatrix}
u^1 \\
u^2 \\
(u^1)^2 + (u^2)^2
\end{pmatrix} \\
D
&= [-1,1]\times[-1,1]
\end{aligned}
```
!!! info "Direction of the surface"
In the next step, we will split the surface into elongated strips.
The domain of each strip should be rectangular, and the longer direction is `u¹`, and the shorter direction is `u²`.
The paraboloid has four‐fold symmetry, so we don't have to take care of it.
### Split the surface into strips
The domain ``D`` will be split into ``D_i``.
```math
\begin{aligned}
D_i
&= [-1,1]\times\left[\frac{i-1}{10},\frac{i}{10}\right] & (i=1,\dots,10)
\end{aligned}
```

In julia script, just define a domain of the strip with function `D(i,n)`.
```@example paraboloid
n = 10
D(i,n) = (-1.0..1.0, (i-1)/n..i/n)
```
### Check the strain prediction
Before computing the embedding numerically, we can predict the strain with *Strain Approximation Formula*:
```math
\begin{aligned}
E_{11}^{\langle 0\rangle}&\approx\frac{1}{2}K_{[0]}B^2\left(r^2-\frac{1}{3}\right)
\end{aligned}
```
You can check this strain estimation using the [`show_strain`](@ref) function.
```@example paraboloid
for i in 1:n
show_strain(D(i,n))
end
```
!!! tip "Allowable strain"
Positive number means tension, and negative number means compression.
Empirically, it is better if the absolute value of strain is smaller than ``0.01 (=1\%)``.
### Initial state
If you finished checking the strain prediction, the next step is determination of the initial state with [`initial_state`](@ref) (or [`initial_state!`](@ref) from the second time).
From this section, the computing is done for each piece of the surface.
First, let's calculate for ``i=1``.
```@example paraboloid
i = 1
```
As a first step, let's compute the initial state.
```@example paraboloid
steptree = initial_state(D(i,n))
```
### Newton-Raphson method iteration
[`newton_onestep!`](@ref) function calculates one step of Newton-Raphson method iteration.
```@example paraboloid
newton_onestep!(steptree, fixingmethod=:fix3points)
newton_onestep!(steptree)
```
You can choose the fixing method from below:
* `:default` (default)
* `:fix3points`
### Refinement of B-spline manifold
```@example paraboloid
refinement!(steptree, p₊=(0,1), k₊=(EmptyKnotVector(),KnotVector([(i-1/2)/10])))
```
The knotvector to be inserted in [`refinement!`](@ref) can be suggested by [`show_knotvector`](@ref) function.
### Pin the step
If you finished computing for the strip, it's time to *pin* the step.
This [`pin!`](@ref) function will be used for the the final export step.
```@example paraboloid
pin!(steptree)
```
If you add a pin mistakenly, you can remove the pin with [`unpin!`](@ref) function.
```@example paraboloid
unpin!(steptree, 4)
```
### Compute more
```@example paraboloid
newton_onestep!(steptree)
newton_onestep!(steptree)
pin!(steptree)
i = 2
initial_state!(steptree, D(i,n))
newton_onestep!(steptree, fixingmethod=:fix3points)
newton_onestep!(steptree)
refinement!(steptree, p₊=(0,1), k₊=(EmptyKnotVector(),KnotVector([(i-1/2)/10])))
newton_onestep!(steptree)
newton_onestep!(steptree)
pin!(steptree)
```
### Export all pinned shapes
This is the final step of the computational process with [`export_pinned_steps`](@ref).
```@example paraboloid
export_pinned_steps(".", steptree, unitlength=(50, "mm"), mesh=(20,1), xlims=(-2,2), ylims=(-0.3,0.3))
```
This will create SVG files in `./pinned`.
`pinned/pinned-6.svg`

`pinned/pinned-12.svg`

The all outputs for `i in 1:10` will be like this:

You can edit these files, and craft them into curved surface shape.
## Other examples
can be found in [gallery](@ref gallery)
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 0.1.0 | 5c9e72c191fadb9ac84137a4a02806a73e63d2c7 | docs | 2170 | # [Symbolic computation](@id symbolic_computation)
There are two `.ipynb` files in [ElasticSurfaceEmbedding-wolfram](https://github.com/hyrodium/ElasticSurfaceEmbedding-wolfram) repository.
* `MainTheorem.ipynb`
* `AnalyticSolution.ipynb`
These files are used for some symbolic computation, so you can skip this part if you just want to make a surface model.
The following contents describes the rule of these files.
Please install the following software in your environment.
* [Wolfram Engine](https://www.wolfram.com/engine/)
* [Wolfram Language kernel for Jupyter notebooks](https://github.com/WolframResearch/WolframLanguageForJupyter)
## Main theorems
There are two theorems in [our paper](https://arxiv.org/abs/2211.06372).
!!! info "Theorem. Approximation of Strain"
In the range of sufficiently small breadth ``B`` of the curved piece, the piece is in an approximately ``u^1``-directional uniaxial stress state at each point, and the principal strain can be approximated as
```math
E^{\langle 0 \rangle}_{11} \approx \frac{1}{2}K_{[0]}B^2\left(r^2-\frac{1}{3}\right), \quad E^{\langle 0 \rangle}_{22} \approx -\nu E^{\langle 0 \rangle}_{11}
```
where ``K_{[0]}`` is the Gaussian curvature along the center curve ``C_{[0]}`` of the reference state ``M_{[0]}``, ``r`` is a normalized breadth-directional coordinate (``−1 \le r \le 1``).

!!! info "Theorem. Approximation of Embedding"
Let ``C_{[0]}`` be the center curve of ``M_{[0]}``, ``\kappa_{[0]}`` be its geodesic curvature, ``B`` be the breadth from center curve of ``M_{[0]}``. Similarly, let ``C_{[t]}`` be the center curve of ``M_{[t]}`` , ``\kappa_{[t]}`` be its planer curvature. If the breadth ``B`` is sufficiently small, then the following approximation is satisfied.
```math
g_{[t]}|_C \approx g_{[0]}|_C, \quad \kappa_{[t]} \approx \kappa_{[0]}.
```

The proof is too hard to calculate by hand, so we wrote code to solve this problem by Wolfram language.
## Analytic solutions
There are analytic solutions for some specific problem.
Please try `AnalyticSolution.ipynb` for more information.
| ElasticSurfaceEmbedding | https://github.com/hyrodium/ElasticSurfaceEmbedding.jl.git |
|
[
"MIT"
] | 2.0.0 | a6a59e578963b25c977a4c59d7fca12ab905f546 | code | 236 | module Stardates
import Dates
import Printf
using TimeZones
include("constants.jl")
include("startofyear.jl")
include("sd.jl")
export Stardate, stardate, defaulttimezone, mediawiki, mstardate,
nyse_closing_stardate
end # module
| Stardates | https://github.com/chrisoei/Stardates.jl.git |
|
[
"MIT"
] | 2.0.0 | a6a59e578963b25c977a4c59d7fca12ab905f546 | code | 243 | const millisecond = 1.0 / 31556952000.0
const second = 1.0 / 31556952.0
const minute = 1.0 / 525949.2
const hour = 1.0 / 8765.82
const day = 1.0 / 365.2425
const week = 7.0 / 365.2425
const fortnight = 14.0 / 365.2425
const month = 1.0 / 12.0
| Stardates | https://github.com/chrisoei/Stardates.jl.git |
|
[
"MIT"
] | 2.0.0 | a6a59e578963b25c977a4c59d7fca12ab905f546 | code | 2239 | struct Stardate
sd::Float64
canonical::String
short::String
originaltz::TimeZones.TimeZone
end
"""
defaulttimezone()
Returns a TimeZone object corresponding to the
IPFS file `/t/env/TZ`.
"""
function defaulttimezone()
tzstring = read(`ipfs files read /t/env/TZ`, String)
TimeZones.TimeZone(tzstring)
end
function Stardate(x::Float64, tz1::TimeZone = tz"UTC")
Stardate(
x,
Printf.@sprintf("%0.15f", x),
Printf.@sprintf("%0.3f", x),
tz1
)
end
function Stardate(zdt::ZonedDateTime)
tx = astimezone(zdt, tz"UTC")
y0 = year(tx)
t0 = Stardates.getstartofyear(y0).zoneddatetime
t1 = Stardates.getstartofyear(y0 + 1).zoneddatetime
Stardate(y0 + (tx - t0) / (t1 - t0), timezone(zdt))
end
function Stardate(dt::Dates.DateTime)
# Assume dt is in UTC
y0 = Dates.year(dt)
t0 = Stardates.getstartofyear(y0).unix
t1 = Stardates.getstartofyear(y0 + 1).unix
Stardate(y0 + (Dates.datetime2unix(dt) - t0) / (t1 - t0))
end
function Stardate(d::Dates.Date, hr, mi, ss, tz)
Stardate(ZonedDateTime(
Dates.year(d),
Dates.month(d),
Dates.day(d),
hr,
mi,
ss,
tz,
))
end
function Stardate(;
year,
month,
day,
hour = 12,
minute = 0,
second = 0,
tz = nothing,
style = nothing,
copy = false
)
if tz == nothing
tz1 = defaulttimezone()
else
tz1 = TimeZone(tz)
end
Stardate(ZonedDateTime(year, month, day, hour, minute, second, tz1))
end
function stardate(args...)
Stardate(args...).sd
end
"Return the stardate of the closing bell of the date"
function nyse_closing_stardate(d::Dates.Date)
stardate(d, 16, 0, 0, tz"America/New_York")
end
"Return the stardate of the closing bell of the date represented by iso8601"
function nyse_closing_stardate(iso8601)
nyse_closing_stardate(parse(Dates.Date, iso8601))
end
function Stardate(st::Base.StatStruct)
Stardate(Dates.unix2datetime(st.mtime))
end
function mstardate(fn::AbstractString)
Stardate(stat(fn))
end
function Stardate(style::Symbol)
@assert(style == :now)
Stardate(now(defaulttimezone()))
end
function mediawiki(x::Stardate)
"<stardate digits=\"3\" tz=\"" *
TimeZones.name(x.originaltz) * "\">" *
x.canonical *
"</stardate>"
end
| Stardates | https://github.com/chrisoei/Stardates.jl.git |
|
[
"MIT"
] | 2.0.0 | a6a59e578963b25c977a4c59d7fca12ab905f546 | code | 291 | startofyear = Dict{Int64,Any}()
function getstartofyear(y)
if haskey(startofyear, y)
return startofyear[y]
end
dt = Dates.DateTime(y, 1, 1, 0, 0, 0)
startofyear[y] = (
datetime = dt,
unix = Dates.datetime2unix(dt),
zoneddatetime = ZonedDateTime(dt, tz"UTC"),
)
end
| Stardates | https://github.com/chrisoei/Stardates.jl.git |
|
[
"MIT"
] | 2.0.0 | a6a59e578963b25c977a4c59d7fca12ab905f546 | code | 1829 | using Stardates
using Test
using TimeZones
@testset "Stardates.jl" begin
@test Stardates.millisecond > 0.0
@test length(Stardate(:now).canonical) == 20
@test length(Stardate(:now).short) == 8
@test abs(Stardate(ZonedDateTime(
2019,
8,
26,
19,
33,
42,
tz"America/Los_Angeles",
)).sd - 2019.652347222222261) < 3.0 * Stardates.second
@test abs(Stardate(ZonedDateTime(2012, 1, 2, 3, 4, 5, tz"UTC")).sd -
2012.0030815181644) < 3.0 * Stardates.second
@test abs(Stardate(ZonedDateTime(
2014,
9,
30,
17,
17,
27,
tz"America/Los_Angeles",
)).sd - 2014.747978420491791) < 3.0 * Stardates.second
@test abs(nyse_closing_stardate("2014-09-30") -
2014.747488584474922) < 3.0 * Stardates.second
@test abs(Stardate(DateTime(2012, 1, 2, 3, 4, 5)).sd -
2012.0030815181644) < 3.0 * Stardates.second
sd1 = Stardate(:now)
sleep(1.0)
f1 = tempname()
write(f1, "Hello")
sd2 = mstardate(f1)
rm(f1)
sleep(1.0)
sd3 = Stardate(:now)
@test sd1.sd <= sd2.sd
@test sd2.sd <= sd3.sd
# Test against Python implementation output
@test stardate(ZonedDateTime(
2019,
10,
11,
11,
7,
0,
tz"America/Los_Angeles",
)) ≈ 2019.7774105783867
@test Stardate(
year = 2063,
month = 4,
day = 5,
tz = "UTC-6"
).sd ≈ 2063.2595890410958
end
| Stardates | https://github.com/chrisoei/Stardates.jl.git |
|
[
"MIT"
] | 2.0.0 | a6a59e578963b25c977a4c59d7fca12ab905f546 | docs | 453 | # Stardates
```
julia> using Stardates
julia> Stardate()
Stardate(2021.800063863648, "2021.800063863647893", "2021.800")
julia> Stardate(year = 2010, month = 5, day = 6, hour = 14, minute = 45,
tz = "America/New_York")
Stardate(2010.3446061643835, "2010.344606164383549", "2010.345")
julia> mstardate(".")
Stardate(2021.800059999239, "2021.800059999239011", "2021.800")
julia> nyse_closing_stardate("2010-05-06")
2010.3447488584475
```
| Stardates | https://github.com/chrisoei/Stardates.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 725 | using Documenter, ImagePhaseCongruency
using TestImages
using DemoCards
testimage("cameraman") # used to trigger artifact downloading
# generate
demopage, postprocess_cb, demo_assets = makedemos("examples") # this is the relative path to docs/
assets = []
isnothing(demo_assets) || (push!(assets, demo_assets))
format = Documenter.HTML(edit_link = "master",
prettyurls = get(ENV, "CI", nothing) == "true",
assets = assets)
makedocs(
format=format,
sitename = "ImagePhaseCongruency",
pages = [
"index.md",
demopage,
"functions.md"
]
)
postprocess_cb()
deploydocs(
repo = "github.com/peterkovesi/ImagePhaseCongruency.jl.git",
)
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 1921 | # ---
# title: Fourier transform of Moisan periodic image component
# id: demo_perfft2
# cover: assets/perfft2.png
# author: Peter Kovesi
# date: 2018-10-26
# ---
# The function `perfft2()` implements Moisan's "Periodic plus Smooth Image
# Decomposition" which decomposes an image into two components
#
# img = p + s
#
# where `s` is the 'smooth' component with mean 0 and `p` is the 'periodic' component
# which has no sharp discontinuities when one moves cyclically across the image
# boundaries.
#
# This decomposition is very useful when one wants to obtain an FFT of an image
# with minimal artifacts introduced from the boundary discontinuities. The image
# `p` gathers most of the image information but avoids periodization artifacts.
#
# Reference:
# L. Moisan, "Periodic plus Smooth Image Decomposition", Journal of
# Mathematical Imaging and Vision, vol 39:2, pp. 161-179, 2011.
using Images
using FFTW
using ImagePhaseCongruency
using ImageContrastAdjustment
using TestImages
img = Float64.(Gray.(testimage("lena")))
IMG = fft(img) # 'Standard' fft
(P, S, p, s) = perfft2(img) # 'Periodic' fft
mosaic(
adjust_histogram(Gray.(p), LinearStretching()),
adjust_histogram(s, LinearStretching()),
## Note the vertical and horizontal cross in
## the spectrum induced by the non-periodic edges.
adjust_histogram(log.(abs.(fftshift(IMG)) .+ 1), LinearStretching()),
## Note the clean spectrum because p is periodic.
adjust_histogram(log.(abs.(fftshift(P)) .+ 1), LinearStretching());
nrow=2, rowmajor=true
)
# Top 1) left: periodic component 2) right: smooth component
#
# Bottom 3) left: spectrum of standard FFT 4) right: spectrum of periodic component
# save cover image #src
isdir("assets") || mkdir("assets") #src
cover = Gray.(adjust_histogram(log.(abs.(fftshift(P)) .+ 1), LinearStretching())) #src
save(joinpath("assets", "perfft2.png"), cover) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 876 | # ---
# title: Log-Gabor filters v3
# id: demo_phasecong3
# cover: assets/phasecong3.png
# author: Peter Kovesi
# date: 2018-10-26
# ---
# Use of the function `phasecong3()` allows corner points to be detected as well. These
# corner points are a subset of the edge image and, unlike other corner detectors, their
# location is precise and stable over different scales.
using TestImages
using Images
using ImagePhaseCongruency
img = restrict(testimage("mandril_gray"))
(edges, corners) = phasecong3(img)
mosaic(
img,
adjust_histogram(Gray.(edges), LinearStretching()),
adjust_histogram(corners, LinearStretching()),
nrow=1
)
# Images from top to right: 1) original image 2) edges 3) corners
# save cover image #src
isdir("assets") || mkdir("assets") #src
save(joinpath("assets", "phasecong3.png"), adjust_histogram(Gray.(edges), LinearStretching())) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 967 | # ---
# title: Monogenic filters
# id: demo_phasecongmono
# cover: assets/phasecongmono.png
# author: Peter Kovesi
# date: 2018-10-26
# ---
# Phase congruency marks all classes of features from steps to lines and is a dimensionless
# quantity that ranges from 0 to 1. This allows fixed thresholds to be used over wide
# classes of images.
using TestImages
using Images
using ImagePhaseCongruency
img = restrict(testimage("mandril_gray"))
(pc, or, ft, T) = phasecongmono(img)
nonmax = Images.thin_edges(pc, or)
mosaic(
img,
adjust_histogram(pc, LinearStretching()),
nonmax,
hysthresh(nonmax, 0.1, 0.2);
nrow=2, rowmajor=true
)
# Images: 1) top left: original image 2) top right: phase congruency 3) bottom left:
# non-maximal suppression 4) bottom right: Hystersis thresholded
# save cover image #src
isdir("assets") || mkdir("assets") #src
save(joinpath("assets", "phasecongmono.png"), adjust_histogram(Gray.(pc), LinearStretching())) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 1095 | # ---
# title: Symmetric monogenic filters
# id: demo_phasesymmono
# cover: assets/phasesymmono.gif
# author: Peter Kovesi
# date: 2018-10-26
# ---
# Phase symmetry responds well to line like features and circular objects. The number of
# filter scales will affect the scale of features that are marked. Phase symmetry marks
# features independently of contrast (a bright circle is not more symmetric than a grey
# circle) and is a dimensionless quantity between 0 and 1. However this may not be what one
# desires in which case the symmetry energy may be of greater interest.
using TestImages
using Images
using ImagePhaseCongruency
img = Gray.(testimage("blobs"))
## Detect regions of bright symmetry (polarity = 1)
phase_bright, = phasesymmono(img; nscale=5, polarity=1)
## Detect regions of dark symmetry (polarity = -1)
phase_dark, = phasesymmono(img; nscale=5, polarity=-1)
mosaic(img, phase_bright, phase_dark; nrow=1)
# save cover image #src
isdir("assets") || mkdir("assets") #src
save(joinpath("assets", "phasesymmono.gif"), Images.gif([phase_bright, phase_dark]); fps=1) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 875 | # ---
# title: Denoise
# id: demo_ppdenoise
# cover: assets/ppdenoise.png
# author: Peter Kovesi
# date: 2018-10-26
# ---
using TestImages
using Images
using ImageContrastAdjustment
using ImagePhaseCongruency
using Random #hide
Random.seed!(1234) #hide
## Values in the range 0 to 1
img = centered(Gray.(restrict(testimage("lighthouse"))))[-127:128, -127:128]
## Add noise with standard deviation of 0.25
img .+= 0.25 * randn(size(img))
cleanimg = ppdenoise(img; nscale=6, norient=6, mult=2.5, minwavelength=2, sigmaonf=0.55, dthetaonsigma=1.0, k=3, softness=1.0)
mosaic(
adjust_histogram(img, LinearStretching()),
adjust_histogram(cleanimg, LinearStretching());
nrow=1
)
# save cover image #src
isdir("assets") || mkdir("assets") #src
cover = adjust_histogram(Gray.(cleanimg), LinearStretching()) #src
save(joinpath("assets", "ppdenoise.png"), cover) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 1480 | # ---
# title: Dynamic Range Compression
# id: demo_ppdrc
# cover: assets/ppdrc.png
# author: Peter Kovesi
# date: 2018-10-26
# ---
# An example using the 16 bit M51 image. Phase preserving dynamic range compression allows
# the scale of analysis to be controlled. Here we process the image at wavelengths up to 50
# pixels and up to 200 pixels. Longer wavelengths allow larger structures to be seen. Small
# wavelengths allow fine structures to be seen. Note the image size is (510, 320).
using TestImages
using Images
using ImageContrastAdjustment
using ImagePhaseCongruency
img = float64.(testimage("m51"))
## Histogram equalization for reference (with a very large number of bins!)
img_histeq = histeq(img, 100_000)
## Phase presserving dynamic range compression at cutoff wavelengths of 50 and
## 200 pixels. Note we scale the image because its raw values are between 0 and
## 1, see the help information for ppdrc() for details.
scale = 1e4
img_ppdrc1 = ppdrc(img*scale, 50)
img_ppdrc2 = ppdrc(img*scale, 200)
mosaic(
adjust_histogram(img, LinearStretching()),
adjust_histogram(img_histeq, LinearStretching()),
adjust_histogram(img_ppdrc1, LinearStretching()),
adjust_histogram(img_ppdrc2, LinearStretching()),
nrow=1
)
# save cover image #src
isdir("assets") || mkdir("assets") #src
cropped_cover = adjust_histogram(centered(img_ppdrc1)[-128:127, -128:127], LinearStretching()) #src
save(joinpath("assets", "ppdrc.png"), cropped_cover) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 801 | # ---
# title: Phase Quantization
# id: demo_quantizephase
# cover: assets/quantizephase.gif
# author: Peter Kovesi
# date: 2018-10-26
# ---
# Phase values in an image are important. However, despite this, phase can be quantized
# very heavily with little perceptual loss. It can be quantized to a few as four levels, or
# even three. Quantizing to two levels still gives an image that can be interpreted.
using TestImages
using Images
using ImagePhaseCongruency
img = Float64.(restrict(testimage("mandril_gray")))
results = map((8, 4, 3, 2)) do nlevels
out = quantizephase(img, nlevels)
clamp01!(Gray.(out))
end
mosaic(results; nrow=1)
# save cover image #src
isdir("assets") || mkdir("assets") #src
save(joinpath("assets", "quantizephase.gif"), Images.gif([results...]); fps=1) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 1303 | # ---
# title: Amplitude swapping
# id: demo_swapphase
# cover: assets/swapphase.gif
# author: Peter Kovesi
# date: 2018-10-26
# ---
# A demonstration of the importance of phase information in images. Given two
# images`swapphase()` takes their Fourier transforms and constructs two new, synthetic,
# images formed from the swapped phase and amplitude imformation. In general it is the
# phase information that dominates. However, for textures where the amplitude spectra can
# be concentrated in a limited set of locations, the reverse can apply.
# See [Oppenheim and Lim's paper "The importance of phase in signals". Proceedings of the
# IEEE. Volume: 69 , Issue: 5 , May 1981](https://ieeexplore.ieee.org/document/1456290)
using TestImages
using Images
using ImagePhaseCongruency
img1 = centered(Float64.(Gray.(restrict(testimage("lighthouse")))))[-127:128, -127:128]
img2 = restrict(Float64.(testimage("mandril_gray")))[1:256, 1:256]
(newimg1, newimg2) = swapphase(img1, img2)
mosaic(Gray.(img1), newimg1, img2, newimg2; nrow=2)
# Bottom 1) left: phase of lighthouse, amplitude of Mandrill 2) right: amplitude of lighthouse, phase of Mandrill
# save cover image #src
isdir("assets") || mkdir("assets") #src
save(joinpath("assets", "swapphase.gif"), Images.gif([img1, newimg1]); fps=1) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 521 | # ---
# title: circsine
# id: demo_circsine
# cover: assets/circsine.png
# author: Peter Kovesi
# date: 2018-10-26
# ---
using Images
using ImagePhaseCongruency
## Circular features at a phase congruent angle of pi/4 and
## an amplitude decay exponent of 1.5
img = circsine(offset = pi/4, ampexponent = -1.5)
adjust_histogram(Gray.(img), LinearStretching())
# save cover image #src
isdir("assets") || mkdir("assets") #src
save(joinpath("assets", "circsine.png"), adjust_histogram(Gray.(img), LinearStretching())) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 594 | # ---
# title: noiseonf
# id: demo_noiseonf
# cover: assets/noiseonf.png
# author: Peter Kovesi
# date: 2018-10-26
# ---
using Images
using ImageContrastAdjustment
using ImagePhaseCongruency
## Noise images with amplitude decay exponents of 1.5 and 2.5
img1 = noiseonf(512, 1.5)
img2 = noiseonf(512, 2.5)
mosaic(
adjust_histogram(Gray.(img1), LinearStretching()),
adjust_histogram(img2, LinearStretching());
nrow=1
)
# save cover image #src
isdir("assets") || mkdir("assets") #src
save(joinpath("assets", "noiseonf.png"), adjust_histogram(Gray.(img1), LinearStretching())) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 517 | # ---
# title: starsine
# id: demo_starsine
# cover: assets/starsine.png
# author: Peter Kovesi
# date: 2018-10-26
# ---
using Images
using ImagePhaseCongruency
## Circular features at a phase congruent angle of pi/2 and
## an amplitude decay exponent of 2
img = starsine(offset = pi/4, ampexponent = -2)
adjust_histogram(Gray.(img), LinearStretching())
# save cover image #src
isdir("assets") || mkdir("assets") #src
save(joinpath("assets", "starsine.png"), adjust_histogram(Gray.(img), LinearStretching())) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 1739 | # ---
# title: step2line
# id: demo_step2line
# cover: assets/step2line.png
# author: Peter Kovesi
# date: 2018-10-26
# ---
# The `step2line()` function generates a phase congruent test image where angle at which the
# congruency occurs is interpolated from 0 at the top of the image to pi/2 at the bottom.
# This produces an interpolation of feature type from step edge to line. The point being
# that phase congruency at any angle produces a feature and the angle at which the
# congruency occurs defines the feature type. Gradient based edge detectors will only
# correctly mark the step-like feature towards the top of the image and incorrectly mark two
# features towards the bottom of the image whereas phase congruency will correctly mark a
# single feature from top to bottom. In general, natural images contain a roughly uniform
# distribution of the full continuum of feature types from step to line.
using Images
using ImagePhaseCongruency
img1 = step2line(ampexponent=-1)
## note the softer features
img2 = step2line(ampexponent=-1.5)
## Compute phase congruency on the `step2line` image using default parameters
(pc,) = phasecongmono(step2line(ampexponent = -1))
fimg = imfilter(step2line(ampexponent = -1), KernelFactors.gaussian((2, 2)))
(gx, gy) = imgradients(fimg, KernelFactors.ando3)
∇img = sqrt.(gx.^2 + gy.^2)
mosaicview(
adjust_histogram(Gray.(img1), LinearStretching()),
adjust_histogram(img2, LinearStretching()),
adjust_histogram(pc, LinearStretching()),
adjust_histogram(∇img, LinearStretching()),
nrow=2, rowmajor=true
)
# save cover image #src
isdir("assets") || mkdir("assets") #src
save(joinpath("assets", "step2line.png"), adjust_histogram(Gray.(img1), LinearStretching())) #src
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 404 | using ImagePhaseCongruency, Images, PyPlot
img = circsine(offset = pi/4, ampexponent = -1.5);
imshow(img)
fimg = imfilter(img, KernelFactors.gaussian((2, 2)))
(grad_y, grad_x) = imgradients(fimg, KernelFactors.ando3)
imshow(grad_y)
orient = orientation(grad_x, grad_y)
mag = magnitude(grad_x, grad_y)
thinned, subpix = thin_edges_nonmaxsup(mag, orient, radius=1.35, theta=pi/180)
imshow(thinned)
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 1738 | #=----------------------------------------------------------------------------
Image Phase Congruency
Phase based feature detection and image enhancement.
Copyright (c) 2015-2018 Peter Kovesi
peterkovesi.com
MIT License:
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
PK June 2015 Original porting from MATLAB to Julia
November 2017 Julia 0.6
October 2018 Julia 0.7/1.0
----------------------------------------------------------------------------=#
"""
**Image Phase Congruency**
Phase based feature detection and image enhancement
Peter Kovesi
[peterkovesi.com](http://peterkovesi.com)
"""
module ImagePhaseCongruency
include("phasecongruency.jl")
include("frequencyfilt.jl")
include("syntheticimages.jl")
include("utilities.jl")
end
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 26255 | #=--------------------------------------------------------------------
frequencyfilt - Functions for constructing image filters in the
frequency domain.
Copyright (c) Peter Kovesi
peterkovesi.com
MIT License:
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
PK August 2015 Original porting from MATLAB to Julia
October 2017 Updates for 0.6
October 2018 Julia 0.7/1.0
---------------------------------------------------------------------=#
export filtergrid, filtergrids, gridangles
export cosineangularfilter, gaussianangularfilter
export lowpassfilter, highpassfilter, bandpassfilter, highboostfilter
export loggabor, monogenicfilters, packedmonogenicfilters
export perfft2
export geoseries
# export homomorphic
#--------------------------------------------------------------------
# filtergrids
"""
Generate grids for constructing frequency domain filters.
```
Usage: (f, fx, fy) = filtergrids(rows, cols)
(f, fx, fy) = filtergrids((rows, cols))
Arguments: rows, cols - Size of image/filter
Returns: f - Grid of size (rows, cols) containing frequency
values from 0 to 0.5, where f =
sqrt(fx^2 + fy^2). The grid is quadrant
shifted so that 0 frequency is at f[1,1]
fx, fy - Grids containing normalised frequency values
ranging from -0.5 to 0.5 in x and y directions
respectively. fx and fy are quadrant shifted.
```
See also: [`filtergrid`](@ref) where you are only needing radius
"""
function filtergrids(rows::Integer, cols::Integer)
# Set up X and Y spatial frequency matrices, fx and fy, with ranges
# normalised to +/- 0.5 The following code adjusts things appropriately for
# odd and even values of rows and columns so that the 0 frequency point is
# placed appropriately.
if isodd(cols)
fxrange = (-(cols-1)/2:(cols-1)/2)/cols
else
fxrange = (-cols/2:(cols/2-1))/cols
end
if isodd(rows)
fyrange = (-(rows-1)/2:(rows-1)/2)/rows
else
fyrange = (-rows/2:(rows/2-1))/rows
end
fx = [c for r = fyrange, c = fxrange]
fy = [r for r = fyrange, c = fxrange]
# Quadrant shift so that filters are constructed with 0 frequency at
# the corners
fx = ifftshift(fx)
fy = ifftshift(fy)
# Construct spatial frequency values in terms of normalised radius from
# centre.
f = sqrt.(fx.^2 .+ fy.^2)
return f, fx, fy
end
# Tuple version
function filtergrids(sze::Tuple{Integer,Integer})
return filtergrids(sze[1], sze[2])
end
#--------------------------------------------------------------------
# filtergrid
"""
Generate grid for constructing frequency domain filters.
```
Usage: f = filtergrid(rows, cols)
f = filtergrid((rows, cols))
Arguments: rows, cols - Size of image/filter
Returns: f - Grid of size (rows, cols) containing normalised
frequency values from 0 to 0.5. Grid is quadrant
shifted so that 0 frequency is at f[1,1]
```
Used by [`phasecongmono`](@ref), [`phasecong3`](@ref), etc etc
See also: [`filtergrids`](@ref) if you also want normalized frequency grids in
the x and y directions as well.
"""
function filtergrid(rows::Integer, cols::Integer)
# Set up X and Y spatial frequency ranges normalised to +/- 0.5
# The following code adjusts things appropriately for odd and even
# values of rows and columns so that the 0 frequency point is
# placed appropriately.
if isodd(cols)
fxrange = (-(cols-1)/2:(cols-1)/2)/cols
else
fxrange = (-cols/2:(cols/2-1))/cols
end
if isodd(rows)
fyrange = (-(rows-1)/2:(rows-1)/2)/rows
else
fyrange = (-rows/2:(rows/2-1))/rows
end
# Construct spatial frequency values in terms of normalised radius from
# centre.
f = [sqrt(fx^2 + fy^2) for fy in fyrange, fx in fxrange]
return ifftshift(f)
end
# Tuple version
function filtergrid(sze::Tuple{Integer,Integer})
return filtergrid(sze[1], sze[2])
end
#--------------------------------------------------------------------
# monogenicfilters
"""
Generate monogenic filter grids.
```
Usage: (H1, H2, f) = monogenicfilters(rows, cols)
(H1, H2, f) = monogenicfilters((rows, cols))
Arguments: rows,cols - Size of filters to generate
Returns: H1, H2 - The two monogenic filters.
f - Frequency grid corresponding to the filters.
where:
H1 = i*fx./f
H2 = i*fy./f
```
Note that H1, H2, and f and quadrant shifted to that the 0 frequency
value is at coordinate [1,1].
See also: [`packedmonogenicfilters`](@ref)
"""
function monogenicfilters(rows::Integer, cols::Integer)
(f, fx, fy) = filtergrids(rows, cols)
f[1,1] = 1 # Set DC value to 1 to avoid divide by zero
H1 = im.*fx./f
H2 = im.*fy./f
H1[1,1] = 0 # Restore 0 DC value
H2[1,1] = 0
f[1,1] = 0
return H1, H2, f
end
# Tuple version
function monogenicfilters(sze::Tuple{Integer,Integer})
return monogenicfilters(sze[1], sze[2])
end
#--------------------------------------------------------------------
# packedmonogenicfilters
"""
Monogenic filter where both filters are packed in the one Complex grid.
```
Usage: (H, f) = packedmonogenicfilters(rows, cols)
(H, f) = packedmonogenicfilters((rows, cols))
Arguments: rows,cols - Size of filters to generate
Returns: H - The two monogenic filters packed into the
one Complex64 grid.
f - Frequency grid corresponding to the filter.
```
The two monogenic filters are defined as
```
H1 = i*fx./f
H2 = i*fy./f
```
However the two filters can be packed together as a complex valued
matrix, one in the real part and one in the imaginary part. Do this
by multiplying H2 by i and then adding it to H1. When the convolution
is performed via the fft the real part of the result will correspond
to the convolution with H1 and the imaginary part with H2. This
allows the two convolutions to be done as one in the frequency domain,
saving time and memory.
Note that H and f and quadrant shifted to that the 0 frequency
value is at coordinate [1,1].
See also: [`monogenicfilters`](@ref)
"""
function packedmonogenicfilters(rows::Integer, cols::Integer)
(f, fx, fy) = filtergrids(rows, cols)
f[1,1] = 1 # Set DC value to 1 to avoid divide by zero
# Pack the two monogenic filters by multiplying H2 by i and then
# adding it to H1 (note the subtraction because i*i = -1).
H = (im.*fx .- fy)./f
H[1,1] = 0 # Restore 0 DC value
f[1,1] = 0
return H, f
end
# Tuple version
function packedmonogenicfilters(sze::Tuple{Integer,Integer})
return packedmonogenicfilters(sze[1], sze[2])
end
#--------------------------------------------------------------------
# lowpassfilter
"""
Construct a low-pass Butterworth filter.
```
Usage: f = lowpassfilter(sze, cutoff, n)
where: sze is a two element tuple specifying the size of filter
to construct (rows, cols).
cutoff is the cutoff frequency of the filter 0 - 0.5
n is the order of the filter, the higher n is the sharper
the transition is. (n must be an integer >= 1).
Note that n is doubled so that it is always an even integer.
1
f = --------------------
2n
1.0 + (w/cutoff)
```
The frequency origin of the returned filter is at the corners.
See also: [`highpassfilter`](@ref), [`highboostfilter`](@ref), [`bandpassfilter`](@ref)
"""
function lowpassfilter(sze::Tuple{Integer, Integer}, cutoff::Real, n::Integer)
if cutoff < 0 || cutoff > 0.5
error("cutoff frequency must be between 0 and 0.5")
end
f = filtergrid(sze)
return 1.0 ./ (1.0 .+ (f ./ cutoff).^(2*n))
end
# Compute the low pass filter value at a specified normalised frequency
function lowpassfilter(f::Real, cutoff::Real, n::Integer)
return 1.0 / (1.0 + (f / cutoff)^(2*n))
end
#--------------------------------------------------------------------
# bandpassfilter
"""
Construct a band-pass Butterworth filter.
```
Usage: f = bandpassfilter(sze, cutin, cutoff, n)
Arguments:
sze - A 2 element tuple specifying the size of filter
to construct (rows, cols).
cutin, cutoff - The frequencies defining the band pass 0 - 0.5
n - The order of the filter, the higher n is the sharper
the transition is. (n must be an integer >= 1).
Returns:
f - Frequency domain filter of size==sze, the frequency
origin is at the corners.
```
See also: [`lowpassfilter`](@ref), [`highpassfilter`](@ref), [`highboostfilter`](@ref)
"""
function bandpassfilter(sze::Tuple{Integer, Integer}, cutin::Real, cutoff::Real, n::Integer)
if cutin < 0 || cutin > 0.5 || cutoff < 0 || cutoff > 0.5
error("Frequencies must be between 0 and 0.5")
end
if n < 1
error("Order of filter must be greater than 1")
end
return lowpassfilter(sze, cutoff, n) - lowpassfilter(sze, cutin, n)
end
#--------------------------------------------------------------------
# highboostfilter
"""
Construct a high-boost Butterworth filter.
```
Usage: f = highboostfilter(sze, cutoff, n, boost)
Arguments:
sze - A 2 element tuple specifying the size of filter
to construct (rows, cols).
cutoff - The cutoff frequency of the filter 0 - 0.5
n - The order of the filter, the higher n is the sharper
the transition is. (n must be an integer >= 1).
boost - The ratio that high frequency values are boosted
relative to the low frequency values. If boost is less
than one then a 'lowboost' filter is generated
Returns:
f - Frequency domain filter of size==sze, the frequency
origin is at the corners.
```
See also: [`lowpassfilter`](@ref), [`highpassfilter`](@ref), [`bandpassfilter`](@ref)
"""
function highboostfilter(sze::Tuple{Integer, Integer}, cutoff::Real, n::Integer, boost::Real)
if cutoff < 0 || cutoff > 0.5
error("cutoff frequency must be between 0 and 0.5")
end
if boost >= 1 # high-boost filter
f = (1.0 - 1.0/boost)*highpassfilter(sze, cutoff, n) .+ 1.0/boost
else # low-boost filter
f = (1.0 - boost)*lowpassfilter(sze, cutoff, n) .+ boost
end
return f
end
#--------------------------------------------------------------------
# highpassfilter
"""
Construct a high-pass Butterworth filter.
```
Usage: f = highpassfilter(sze, cutoff, n)
sze - A 2 element tuple specifying the size of filter
to construct (rows, cols).
cutoff - The cutoff frequency of the filter 0 - 0.5
n - The order of the filter, the higher n is the sharper
the transition is. (n must be an integer >= 1).
Returns:
f - Frequency domain filter of size==sze, the frequency
origin is at the corners.
```
See also: [`lowpassfilter`](@ref), [`highboostfilter`](@ref), [`bandpassfilter`](@ref)
"""
function highpassfilter(sze::Tuple{Integer, Integer}, cutoff::Real, n::Integer)
if cutoff < 0 || cutoff > 0.5
error("cutoff frequency must be between 0 and 0.5")
end
return 1.0 .- lowpassfilter(sze, cutoff, n)
end
#--------------------------------------------------------------------
# loggabor
"""
The logarithmic Gabor function in the frequency domain.
```
Usage: v = loggabor(f::Real, fo::Real, sigmaOnf::Real)
Arguments:
f - Frequency to evaluate the function at.
fo - Centre frequency of filter.
sigmaOnf - Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center frequency.
sigmaOnf = 0.75 gives a filter bandwidth of about 1 octave.
sigmaOnf = 0.55 gives a filter bandwidth of about 2 octaves.
```
"""
function loggabor(f::Real, fo::Real, sigmaOnf::Real)
if f < eps()
return 0.0
else
return exp((-(log(f/fo))^2) / (2 * log(sigmaOnf)^2))
end
end
#-------------------------------------------------------------
# gridangles
"""
Generate arrays of filter grid angles.
```
Usage: (sintheta, costheta) = gridangles(freq, fx, fy)
Arguments: freq, fx, fy - The output of filtergrids()
Returns: sintheta - The sine and cosine of the angles in the filtergrid
costheta
```
See also [`filtergrids`](@ref)
"""
function gridangles(freq::AbstractArray{T1,2},
fx::AbstractArray{T2,2}, fy::AbstractArray{T3,2}) where {T1 <: Real, T2 <: Real, T3 <: Real}
freq[1,1] = 1 # Avoid divide by 0
sintheta = fx./freq # sine and cosine of filter grid angles
costheta = fy./freq
freq[1,1] = 0 # Restore 0 DC
return sintheta, costheta
end
#--------------------------------------------------------------------
# cosineangularfilter
"""
Orientation selective frequency domain filter with cosine windowing function.
```
Usage: filter = cosineangularfilter(angl, wavelen, sintheta, costheta)
Arguments:
angl - Orientation of the filter (radians)
wavelen - Wavelength of the angular cosine window function.
sintheta, costheta - Grids as returned by gridangles()
```
See also: [`gaussianangularfilter`](@ref), [`filtergrids`](@ref)
"""
function cosineangularfilter(angl::Real, wavelen::Real,
sintheta::Array{T1,2}, costheta::Array{T2,2}) where {T1 <: Real, T2 <: Real}
sinangl = sin(angl); cosangl = cos(angl)
fltr = zeros(size(sintheta))
# For each point in the filter matrix calculate the angular
# distance from the specified filter orientation. To overcome
# the angular wrap-around problem sine difference and cosine
# difference values are first computed and then the atan2
# function is used to determine angular distance.
for n in eachindex(sintheta)
ds = sintheta[n] * cosangl - costheta[n] * sinangl # Difference in sine.
dc = costheta[n] * cosangl + sintheta[n] * sinangl # Difference in cosine.
dtheta = abs(atan(ds, dc)) # Absolute angular distance.
# Scale theta so that cosine spread function has the right
# wavelength and clamp to pi. dtheta has a wavelength of
# 2pi. If desired wavelength of cosine window function is
# wavelen we need to multiply dtheta by 2*pi/wavelen.
dtheta = min(dtheta*2*pi/wavelen, pi)
# The spread function is cos(dtheta) between -pi and pi. We add 1,
# and then divide by 2 so that the value ranges 0-1
fltr[n] = (cos(dtheta)+1)/2
end
return fltr
end
#--------------------------------------------------------------------
# gaussianangularfilter
"""
Orientation selective frequency domain filter with Gaussian windowing function.
```
Usage: filter = gaussianangularfilter(angl, thetaSigma, sintheta, costheta)
Arguments:
angl - Orientation of the filter (radians)
thetasigma - Standard deviation of angular Gaussian window function.
sintheta, costheta - Grids as returned by gridangles()
```
See also: [`cosineangularfilter`](@ref), [`gridangles`](@ref), [`filtergrids`](@ref)
"""
function gaussianangularfilter(angl::Real, thetaSigma::Real,
sintheta::Array{T1,2}, costheta::Array{T2,2}) where {T1 <: Real, T2 <: Real}
sinangl = sin(angl); cosangl = cos(angl)
fltr = zeros(size(sintheta))
# For each point in the filter matrix calculate the angular
# distance from the specified filter orientation. To overcome
# the angular wrap-around problem sine difference and cosine
# difference values are first computed and then the atan2
# function is used to determine angular distance.
for n in eachindex(sintheta)
ds = sintheta[n] * cosangl - costheta[n] * sinangl # Difference in sine.
dc = costheta[n] * cosangl + sintheta[n] * sinangl # Difference in cosine.
dtheta = atan(ds, dc) # Angular distance.
fltr[n] = exp((-dtheta.^2) / (2 * thetaSigma^2))
end
return fltr
end
#--------------------------------------------------------------------
#=
"""
homomorphic - Performs homomorphic filtering on an image.
Function performs homomorphic filtering on an image. This form of
filtering sharpens features and flattens lighting variantions in an
image. It usually is very effective on images which have large
variations in lighting, for example when a subject appears against
strong backlighting.
```
Usage: newim =
homomorphic(inimage,boost,CutOff,order,lhistogram_cut,uhistogram_cut, hndl)
homomorphic(inimage,boost,CutOff,order,lhistogram_cut,uhistogram_cut)
homomorphic(inimage,boost,CutOff,order,hndl)
homomorphic(inimage,boost,CutOff,order)
Parameters: (suggested values are in brackets)
boost - The ratio that high frequency values are boosted
relative to the low frequency values (2).
CutOff - Cutoff frequency of the filter (0 - 0.5)
order - Order of the modified Butterworth style filter that
is used, this must be an integer > 1 (2)
lhistogram_cut - Percentage of the lower end of the filtered image's
histogram to be truncated, this eliminates extreme
values in the image from distorting the final result. (0)
uhistogram_cut - Percentage of upper end of histogram to truncate. (5)
hndl - Optional handle to text box for updating
messages to be sent to a GUI interface.
```
If lhistogram_cut and uhistogram_cut are not specified no histogram
truncation will be applied.
Suggested values: newim = homomorphic(im, 2, .25, 2, 0, 5)
"""
# June 1999
# December 2001 cleaned up and modified to work with colour images
function him = homomorphic(im, boost, CutOff, order, varargin)
if ndims(im) == 2 # Greyscale image
him = Ihomomorphic(im, boost, CutOff, order, varargin)
else # Assume colour image in RGB format
hsv = rgb2hsv(im) # Convert to HSV and apply homomorphic
# filtering to just the intensity component.
hsv(:,:,3) = Ihomomorphic(hsv(:,:,3), boost, CutOff, order, varargin)
him = hsv2rgb(hsv) # Convert back to RGB
end
#------------------------------------------------------------------------
# Internal function that does the real work
#------------------------------------------------------------------------
function him = Ihomomorphic(im, boost, CutOff, order, varargin)
# The possible elements in varargin are:
# {lhistogram_cut, uhistogram_cut, hndl}
varargin = varargin{:}
if nargin == 5
nopparams = length(varargin)
end
if (nopparams == 3)
dispStatus = 1
truncate = 1
lhistogram_cut = varargin{1}
uhistogram_cut = varargin{2}
hndl = varargin{3}
elseif (nopparams == 2)
dispStatus = 0
truncate = 1
lhistogram_cut = varargin{1}
uhistogram_cut = varargin{2}
elseif (nopparams == 1)
dispStatus = 1
truncate = 0
hndl = varargin{1}
elseif (nopparams == 0)
dispStatus = 0
truncate = 0
else
disp('Usage: newim = homomorphic(inimage,LowGain,HighGain,CutOff,order,lhistogram_cut,uhistogram_cut)')
error('or newim = homomorphic(inimage,LowGain,HighGain,CutOff,order)')
end
[rows,cols] = size(im)
im = normalise(im) # Rescale values 0-1 (and cast
# to `double' if needed).
FFTlogIm = fft2(log(im+.01)) # Take FFT of log (with offset
# to avoid log of 0).
h = highboostfilter([rows cols], CutOff, order, boost)
him = exp(real(ifft2(FFTlogIm.*h))) # Apply the filter, invert
# fft, and invert the log.
if truncate
# Problem:
# The extreme bright values in the image are exaggerated by the filtering.
# These (now very) bright values have the overall effect of darkening the
# whole image when we rescale values to 0-255.
#
# Solution:
# Construct a histogram of the image. Find the level below which a high
# percentage of the image lies (say 95#). Saturate the grey levels in
# the image to this level.
if dispStatus
set(hndl,'String','Calculating histogram and truncating...')
drawnow
else
disp('Calculating histogram and truncating...')
end
him = histtruncate(him, lhistogram_cut, uhistogram_cut)
else
him = normalise(him) # No truncation, but fix range 0-1
end
end
=#
#--------------------------------------------------------------------
# perfft2
"""
2D Fourier transform of Moisan's periodic image component.
```
Usage: (P, S, p, s) = perfft2(img)
Argument: img - Image to be transformed
Returns: P - 2D fft of periodic image component
S - 2D fft of smooth component
p - Periodic component (spatial domain)
s - Smooth component (spatial domain)
```
Moisan's "Periodic plus Smooth Image Decomposition" decomposes an image
into two components
img = p + s
where s is the 'smooth' component with mean 0 and p is the 'periodic'
component which has no sharp discontinuities when one moves cyclically
across the image boundaries.
This decomposition is very useful when one wants to obtain an FFT of
an image with minimal artifacts introduced from the boundary
discontinuities. The image p gathers most of the image information
but avoids periodization artifacts.
The typical use of this function is to obtain a 'periodic only' fft of an
image
P = perfft2(img)
Displaying the amplitude spectrum of P will yield a clean spectrum without the
typical vertical-horizontal 'cross' arising from the image boundaries that you
would normally see.
Note if you are using the function to perform filtering in the frequency
domain you may want to retain s (the smooth component in the spatial domain)
and add it back to the filtered result at the end.
The computational cost of obtaining the 'periodic only' FFT involves taking an
additional FFT.
"""
function perfft2(img::Array{T,2}) where T <: Real
#=
Reference:
This code is adapted from Lionel Moisan's Scilab function 'perdecomp.sci'
"Periodic plus Smooth Image Decomposition" 07/2012 available at
http://www.mi.parisdescartes.fr/~moisan/p+s
Paper:
L. Moisan, "Periodic plus Smooth Image Decomposition", Journal of
Mathematical Imaging and Vision, vol 39:2, pp. 161-179, 2011.
=#
(rows,cols) = size(img)
# Compute the boundary image which is equal to the image discontinuity
# values across the boundaries at the edges and is 0 elsewhere
s = zeros(rows, cols)
s[1,:] = img[1,:] - img[end,:]
s[end,:] = -s[1,:]
s[:,1] = s[:,1] + img[:,1] - img[:,end]
s[:,end] = s[:,end] - img[:,1] + img[:,end]
# Generate grid upon which to compute the filter for the boundary image in
# the frequency domain. Note that cos() is cyclic hence the grid values can
# range from 0 .. 2*pi rather than 0 .. pi and then pi .. 0
# Generate FFT of smooth component
cxrange = 2*pi*(0:cols-1)/cols
cyrange = 2*pi*(0:rows-1)/rows
denom = [2*(2 - cos(cx) - cos(cy)) for cy in cyrange, cx in cxrange]
S = fft(s)./denom
# The [1,1] element of the filter will be 0 so S[1,1] may be Inf or NaN
S[1,1] = 0.0 # Enforce 0 mean
P = fft(img) .- S # FFT of periodic component
# ** ? Perhaps have a separate version or a flag to request the
# ** spatial versions of p and s
s = real(ifft(S))
p = img .- s
return P, S, p, s
end
#----------------------------------------------------------------------
# geoseries
"""
Generate geometric series.
Useful for generating geometrically scaled wavelengths for specifying
filter banks.
```
Usage 1: s = geoseries(s1, mult, n)
Arguments: s1 - The starting value in the series.
mult - The scaling factor between succesive values.
n - The desired number of elements in the series.
Usage 2: s = geoseries((s1, sn), n)
Arguments: (s1, sn) - Tuple specifying the 1st and last values
in the the series.
n - The desired number of elements in the series.
```
Example:
```
s = geoseries(0.5, 2, 4)
s = [0.5000, 1.0000, 2.0000, 4.0000]
```
Alternatively obtain the same series using
```
s = geoseries((0.5, 4), 4)
```
"""
function geoseries(s1::Real, mult::Real, n::Integer)
@assert n > 0 "Number of elements must be a +ve integer"
@assert s1 > 0 "Starting value must be > 0"
return s = s1 * mult.^(0:(n-1))
end
function geoseries(s1sn::Tuple{Real, Real}, n::Int)
# Compute the multiplier from the desired number of elements.
# max_val = s1*mult^(n-1)
s1 = s1sn[1]
sn = s1sn[2]
@assert s1 > 0 "Starting value must be > 0"
mult = exp(log(sn/s1)/(n-1))
return geoseries(s1, mult, n)
end
#----------------------------------------------------------------------
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
|
[
"MIT"
] | 0.2.2 | 6a18107b6fc89bb32eb5dcec609a7355b53e8b78 | code | 87039 | #=--------------------------------------------------------------------
phasecongruency - Functions related to the phase congruency model of
feature perception and phase based approaches to
image processing.
Copyright (c) 2015-2018 Peter Kovesi
peterkovesi.com
MIT License:
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
August 2015 Original conversion from MATLAB to Julia
November 2017 Julia 0.6
October 2018 Julia 0.7/1.0
---------------------------------------------------------------------=#
using FFTW, Statistics
using ImageCore
export phasecongmono, phasesymmono, ppdrc
export highpassmonogenic, bandpassmonogenic
export gaborconvolve, monofilt
export phasecong3, phasesym, ppdenoise
#--------------------------------------------------------------------
# ppdrc
"""
Phase Preserving Dynamic Range Compression
Generates a series of dynamic range compressed images at different scales.
This function is designed to reveal subtle features within high dynamic range
images such as aeromagnetic and other potential field grids. Often this kind
of data is presented using histogram equalisation in conjunction with a
rainbow colourmap. A problem with histogram equalisation is that the contrast
amplification of a feature depends on how commonly its data value occurs,
rather than on the amplitude of the feature itself.
Phase Preserving Dynamic Range Compression allows subtle features to be
revealed without these distortions. Perceptually important phase information
is preserved and the contrast amplification of anomalies in the signal is
purely a function of their amplitude. It operates as follows: first a highpass
filter is applied to the data, this controls the desired scale of analysis.
The 2D analytic signal of the data is then computed to obtain local phase and
amplitude at each point in the image. The amplitude is attenuated by adding 1
and then taking its logarithm, the signal is then reconstructed using the
original phase values.
```
Usage: dimg = ppdrc(img, wavelength; clip, n)
Arguments: img - Image to be processed. A 2D array of Real or Gray elements.
wavelength - Scalar value, or Vector, of wavelengths, in pixels, of
the cut-in frequencies to be used when forming the highpass
versions of the image. Try a range of values starting
with, say, a wavelength corresponding to half the size
of the image and working down to something like 50
grid units.
Keyword arguments:
clip - Percentage of output image histogram to clip. Only a
very small value should be used, say 0.01 or 0.02, but
this can be beneficial. Defaults to 0.01%
n - Order of the Butterworth high pass filter. Defaults
to 2
Returns: dimg - Array of the dynamic range reduced images. If only
one wavelength is specified the image is returned
directly, and not as a one element array of image arrays.
```
Important: Scaling of the image affects the results. If your image has values
of order 1 or less it is useful to scale the image up a few orders of magnitude.
The reason is that when the frequency amplitudes are attenuated we add one
before taking the log to avoid obtaining negative results for values less than
one. Thus if `v` is small `log(1 + v)` will not be a good approximation to `log(v)`.
However, if you scale the image by say, 1000 then `log(1 + 1000*v)` will be a reasonable
approximation to `log(1000*v)`.
When specifying the array `wavelength` it is suggested that you use wavelengths
that increase in a geometric series. You can use the function `geoseries()` to
conveniently do this
Example using `geoseries()` to generate a set of wavelengths that increase
geometrically in 10 steps from 50 to 800.
```
dimg = ppdrc(img, geoseries((50 800), 10))
```
See also: [`highpassmonogenic`](@ref), [`geoseries`](@ref)
"""
function ppdrc(img::AbstractArray{T1,2}, wavelength::Vector{T2}; clip::Real=0.01, n::Integer=2) where {T1 <: Real, T2 <: Real}
#=
Reference:
Peter Kovesi, "Phase Preserving Tone Mapping of Non-Photographic High Dynamic
Range Images". Proceedings: Digital Image Computing: Techniques and
Applications 2012 (DICTA 2012). Available via IEEE Xplore
Preprint: http://www.peterkovesi.com/papers/DICTA2012-tonemapping.pdf
=#
nscale = length(wavelength)
(ph, _, E) = highpassmonogenic(img, wavelength, n)
# Construct each dynamic range reduced image
dimg = Vector{Array{Float64,2}}(undef, nscale)
if nscale == 1 # Single image, highpassmonogenic() will have returned single
# images, hence this separate case
dimg[1] = histtruncate(sin.(ph).*log1p.(E), clip, clip)
else # ph and E will be arrays of 2D arrays
range = zeros(nscale,1)
for k = 1:nscale
dimg[k] = histtruncate(sin.(ph[k]).*log1p.(E[k]), clip, clip)
range[k] = maximum(abs.(dimg[k]))
end
maxrange = maximum(range)
# Set the first two pixels of each image to +range and -range so that
# when the sequence of images are displayed together, say using linimix(),
# there are no unexpected overall brightness changes
for k = 1:nscale
dimg[k][1] = maxrange
dimg[k][2] = -maxrange
end
end
if nscale == 1 # Single image, return output matrix directly
return dimg[1]
else
return dimg
end
end
# Case when wavelength is a single value
function ppdrc(img::AbstractArray{T1,2}, wavelength::Real; clip::Real=0.01, n::Integer=2) where T1 <: Real
return ppdrc(img, [wavelength]; clip=clip, n=n)
end
# Case for an image of Gray values
function ppdrc(img::AbstractArray{T1,2}, wavelength::Real; clip::Real=0.01, n::Integer=2) where T1 <: Gray
fimg = Float64.(img)
return ppdrc(fimg, wavelength; clip=clip, n=n)
end
#--------------------------------------------------------------------
# highpassmonogenic
"""
Compute phase and amplitude in highpass images via monogenic filters.
```
Usage: (phase, orient, E) = highpassmonogenic(img, maxwavelength, n)
Arguments: img - Image to be processed. A 2D array of Real or Gray elements.
maxwavelength - Wavelength(s) in pixels of the cut-in frequency(ies)
of the Butterworth highpass filter.
n - The order of the Butterworth filter. This is an
integer >= 1. The higher the value the sharper
the cutoff.
Returns: phase - The local phase. Values are between -pi/2 and pi/2
orient - The local orientation. Values between -pi and pi.
Note that where the local phase is close to
+-pi/2 the orientation will be poorly defined.
E - Local energy, or amplitude, of the signal.
```
Note that `maxwavelength` can be an array in which case the outputs will
be an array of output images of length `nscales`, where `nscales = length(maxwavelength)`.
See also: [`bandpassmonogenic`](@ref), [`ppdrc`](@ref), [`monofilt`](@ref)
"""
function highpassmonogenic(img::AbstractArray{T1,2}, maxwavelength::Vector{T2}, n::Integer) where {T1 <: Real, T2 <: Real}
if minimum(maxwavelength) < 2
error("Minimum wavelength that can be specified is 2 pixels")
end
nscales = length(maxwavelength)
IMG = fft(img)
# Generate monogenic and filter grids
(H1, H2, freq) = monogenicfilters(size(img))
phase = Vector{Array{Float64,2}}(undef, nscales)
orient = Array{Array{Float64,2}}(undef, nscales)
E = Vector{Array{Float64,2}}(undef, nscales)
f = zeros(size(img))
h1f = zeros(size(img))
h2f = zeros(size(img))
H = zeros(size(img))
for s = 1:nscales
# High pass Butterworth filter
H .= 1.0 .- 1.0 ./ (1.0 .+ (freq .* maxwavelength[s]).^(2*n))
f .= real.(ifft(H.*IMG))
h1f .= real.(ifft(H.*H1.*IMG))
h2f .= real.(ifft(H.*H2.*IMG))
phase[s] = atan.(f./sqrt.(h1f.^2 .+ h2f.^2 .+ eps()))
orient[s] = atan.(h2f, h1f)
E[s] = sqrt.(f.^2 .+ h1f.^2 .+ h2f.^2)
end
# If a single scale specified return output matrices directly
if nscales == 1
return phase[1], orient[1], E[1]
else
return phase, orient, E
end
end
# Version when maxwavelength is a scalar
function highpassmonogenic(img::AbstractArray{T,2}, maxwavelength::Real, n::Integer) where T <: Real
return highpassmonogenic(img, [maxwavelength], n)
end
# Case for an image of Gray values
function highpassmonogenic(img::AbstractArray{T,2}, maxwavelength, n::Integer) where T <: Gray
fimg = Float64.(img)
return highpassmonogenic(fimg, maxwavelength, n)
end
#--------------------------------------------------------------------
# bandpassmonogenic
"""
Compute phase and amplitude in bandpass images via monogenic filters.
```
Usage: (phase, orient, E) = bandpassmonogenic(img, minwavelength, maxwavelength, n)
Arguments: img - Image to be processed. A 2D array of Real or Gray elements.
minwavelength - } Wavelength(s) in pixels of the cut-in and cut-out frequency(ies)
maxwavelength - } of the Butterworth bandpass filter(s).
n - The order of the Butterworth filter. This is an
integer >= 1. The higher the value the sharper
the cutoff.
Returns: phase - The local phase. Values are between -pi/2 and pi/2
orient - The local orientation. Values between -pi and pi.
Note that where the local phase is close to
+-pi/2 the orientation will be poorly defined.
E - Local energy, or amplitude, of the signal.
```
Note that `minwavelength` and `maxwavelength` can be (equal length) arrays in which case the outputs will
be an array of output images of length `nscales`, where `nscales = length(maxwavelength)`.
See also: [`highpassmonogenic`](@ref), [`ppdrc`](@ref), [`monofilt`](@ref)
"""
function bandpassmonogenic(img::AbstractArray{T1,2}, minwavelength::Vector{T2}, maxwavelength::Vector{T3}, n::Integer) where {T1 <: Real, T2 <: Real, T3 <: Real}
if minimum(minwavelength) < 2 || minimum(maxwavelength) < 2
error("Minimum wavelength that can be specified is 2 pixels")
end
if length(minwavelength) != length(maxwavelength)
error("Arrays of min and max wavelengths must be of same length")
end
nscales = length(maxwavelength)
IMG = fft(img)
# Generate monogenic and filter grids
(H1, H2, freq) = monogenicfilters(size(img))
phase = Vector{Array{Float64,2}}(undef, nscales)
orient = Array{Array{Float64,2}}(undef, nscales)
E = Vector{Array{Float64,2}}(undef, nscales)
f = zeros(size(img))
h1f = zeros(size(img))
h2f = zeros(size(img))
H = zeros(size(img))
for s = 1:nscales
# Band pass Butterworth filter
H .= 1.0 ./ (1.0 .+ (freq .* minwavelength[s]).^(2*n)) .-
1.0 ./ (1.0 .+ (freq .* maxwavelength[s]).^(2*n))
f .= real.(ifft(H.*IMG))
h1f .= real.(ifft(H.*H1.*IMG))
h2f .= real.(ifft(H.*H2.*IMG))
phase[s] = atan.(f./sqrt.(h1f.^2 .+ h2f.^2 .+ eps()))
orient[s] = atan.(h2f, h1f)
E[s] = sqrt.(f.^2 .+ h1f.^2 .+ h2f.^2)
end
# If a single scale specified return output matrices directly
if nscales == 1
return phase[1], orient[1], E[1]
else
return phase, orient, E
end
end
# Version when min and maxwavelength is a scalar
function bandpassmonogenic(img::AbstractArray{T,2}, minwavelength::Real, maxwavelength::Real, n::Integer) where T <: Real
return bandpassmonogenic(img, [minwavelength], [maxwavelength], n)
end
# Case for an image of Gray values
function bandpassmonogenic(img::AbstractArray{T,2}, minwavelength, maxwavelength, n::Integer) where T <: Gray
fimg = Float64.(img)
return bandpassmonogenic(fimg, minwavelength, maxwavelength, n)
end
#--------------------------------------------------------------------
# phasecongmono
"""
Phase congruency of an image using monogenic filters.
This code is considerably faster than `phasecong3()` but you may prefer the
output from `phasecong3()`'s oriented filters.
There are potentially many arguments, here is the full usage:
```
(PC, or, ft, T) =
phasecongmono(img; nscale, minwavelength, mult,
sigmaonf, k, cutoff, g, deviationgain, noisemethod)
However, apart from the image, all parameters have defaults and the
usage can be as simple as:
(PC,) = phasecongmono(img) # Use (PC,) so that PC is not a tuple of all
# the returned values
More typically you will pass the image followed by a series of keyword
arguments that you wish to set, leaving the remaining parameters set to
their defaults, for example:
(PC,) = phasecongmono(img, nscale = 5, minwavelength = 3, k = 2.5)
Keyword arguments:
Default values Description
nscale 4 - Number of wavelet scales, try values 3-6
A lower value will reveal more fine scale
features. A larger value will highlight 'major'
features.
minwavelength 3 - Wavelength of smallest scale filter.
mult 2.1 - Scaling factor between successive filters.
sigmaonf 0.55 - Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center frequency.
k 3.0 - No of standard deviations of the noise energy beyond
the mean at which we set the noise threshold point.
You may want to vary this up to a value of 10 or
20 for noisy images
cutoff 0.5 - The fractional measure of frequency spread
below which phase congruency values get penalized.
g 10 - Controls the sharpness of the transition in
the sigmoid function used to weight phase
congruency for frequency spread.
deviationgain 1.5 - Amplification to apply to the calculated phase
deviation result. Increasing this sharpens the
edge responses, but can also attenuate their
magnitude if the gain is too large. Sensible
values to use lie in the range 1-2.
noisemethod -1 - Parameter specifies method used to determine
noise statistics.
-1 use median of smallest scale filter responses
-2 use mode of smallest scale filter responses
0+ use noiseMethod value as the fixed noise threshold
A value of 0 will turn off all noise compensation.
Returned values:
PC - Phase congruency indicating edge significance
or - Orientation image in radians -pi/2 to pi/2, +ve anticlockwise.
0 corresponds to a vertical edge, pi/2 is horizontal.
ft - Local weighted mean phase angle at every point in the
image. A value of pi/2 corresponds to a bright line, 0
corresponds to a step and -pi/2 is a dark line.
T - Calculated noise threshold (can be useful for
diagnosing noise characteristics of images). Once you know
this you can then specify fixed thresholds and save some
computation time.
```
The convolutions are done via the FFT. Many of the parameters relate to the
specification of the filters in the frequency plane. The values do not seem
to be very critical and the defaults are usually fine. You may want to
experiment with the values of `nscales` and `k`, the noise compensation
factor.
Typical sequence of operations to obtain an edge image:
```
> (PC, or) = phasecongmono(img)
> nm = nonmaxsup(PC, or, 1.5) # nonmaxima suppression
> bw = hysthresh(nm, 0.1, 0.3) # hysteresis thresholding 0.1 - 0.3
Notes on filter settings to obtain even coverage of the spectrum
sigmaonf .85 mult 1.3
sigmaonf .75 mult 1.6 (filter bandwidth ~1 octave)
sigmaonf .65 mult 2.1
sigmaonf .55 mult 3 (filter bandwidth ~2 octaves)
```
Note that better results are generally achieved using the large
bandwidth filters. I typically use a `sigmaOnf` value of 0.55 or even
smaller.
See also: [`phasecong3`](@ref), [`phasesymmono`](@ref), [`gaborconvolve`](@ref), [`filtergrid`](@ref)
"""
function phasecongmono(img::AbstractArray{T1,2}; nscale::Integer = 4, minwavelength::Real = 3,
mult::Real = 2.1, sigmaonf::Real = 0.55, k::Real = 3.0,
noisemethod::Real = -1, cutoff::Real = 0.5, g::Real = 10.0,
deviationgain::Real = 1.5) where T1 <: Real
#=
References:
Peter Kovesi, "Image Features From Phase Congruency". Videre: A
Journal of Computer Vision Research. MIT Press. Volume 1, Number 3,
Summer 1999 http://www.cs.rochester.edu/u/brown/Videre/001/v13.html
Michael Felsberg and Gerald Sommer, "A New Extension of Linear Signal
Processing for Estimating Local Properties and Detecting Features". DAGM
Symposium 2000, Kiel
Michael Felsberg and Gerald Sommer. "The Monogenic Signal" IEEE
Transactions on Signal Processing, 49(12):3136-3144, December 2001
Peter Kovesi, "Phase Congruency Detects Corners and Edges". Proceedings
DICTA 2003, Sydney Dec 10-12. Available via IEEE Xplore
Preprint: http://www.peterkovesi.com/papers/phasecorners.pdf
=#
epsilon = .0001 # Used to prevent division by zero.
(rows,cols) = size(img)
# (IMG,) = perfft2(img) # Periodic Fourier transform of image
# (Just get the first returned value)
IMG = fft(img) # Use fft rather than perfft2
sumAn = zeros(rows,cols) # Accumulators
sumf = zeros(rows,cols)
sumh1 = zeros(rows,cols)
sumh2 = zeros(rows,cols)
maxAn = zeros(rows,cols) # Need maxAn in main scope of function
IMGF = zeros(ComplexF64, rows, cols) # Buffers
h = zeros(ComplexF64, rows, cols)
f = zeros(rows, cols)
h1 = zeros(rows, cols)
h2 = zeros(rows, cols)
An = zeros(rows, cols)
or = zeros(rows,cols) # Final output arrays
ft = zeros(rows,cols)
energy = zeros(rows,cols)
PC = zeros(rows,cols)
tau = 0.0
T = 0.0
# Generate filter grids in the frequency domain
(H, freq) = packedmonogenicfilters(rows,cols)
# The two monogenic filters H1 and H2 that are packed within H are
# not selective in terms of the magnitudes of the frequencies.
# The code below generates bandpass log-Gabor filters which are
# point-wise multiplied by IMG to produce different bandpass
# versions of the image before being convolved with H1 and H2. We
# also apply a low-pass filter that is as large as possible, yet
# falls away to zero at the boundaries. All filters are
# multiplied by this to ensure no extra frequencies at the
# 'corners' of the FFT are incorporated as this can upset the
# normalisation process when calculating phase symmetry. The
# low-pass filter has a cutoff frequency of 0.45 and a high order of 15.
for s = 1:nscale
wavelength = minwavelength*mult^(s-1)
fo = 1.0/wavelength # Centre frequency of filter.
# For each element in IMG construct and apply the log Gabor filter and low-pass filter
# to produce IMGF, the bandpassed image in the frequency domain.
for n in eachindex(freq)
IMGF[n] = IMG[n]*loggabor(freq[n], fo, sigmaonf)*lowpassfilter(freq[n], 0.45, 15)
end
f .= real.(ifft(IMGF)) # Bandpassed image in spatial domain.
h .= IMGF.*H # Apply monogenic filter.
ifft!(h) # real part of h contains convolution result with h1,
# imaginary part contains convolution result with h2.
# h .= ifft(IMGF.*H) # (not as fast or memory efficient)
@. h1 = real(h)
@. h2 = imag(h)
@. An = sqrt(f^2 + h1^2 + h2^2) # Amplitude of this scale component.
@. sumAn += An # Sum of component amplitudes over scale.
@. sumf += f
@. sumh1 += h1
@. sumh2 += h2
# At the smallest scale estimate noise characteristics from the
# distribution of the filter amplitude responses stored in sumAn.
# tau is the Rayleigh parameter that is used to describe the
# distribution.
if s == 1
if abs(noisemethod + 1) < epsilon # Use median to estimate noise statistics
tau = median(sumAn)/sqrt(log(4))
elseif abs(noisemethod + 2) < epsilon # Use mode to estimate noise statistics
tau = rayleighmode(sumAn)
end
maxAn .= An
else
# Record maximum amplitude of components across scales. This is needed
# to determine the frequency spread weighting.
maxAn .= max.(maxAn, An)
end
end # For each scale
# Form weighting that penalizes frequency distributions that are
# particularly narrow. Calculate fractional 'width' of the frequencies
# present by taking the sum of the filter response amplitudes and dividing
# by the maximum component amplitude at each point on the image. If
# there is only one non-zero component width takes on a value of 0, if
# all components are equal width is 1.
width = (sumAn./(maxAn .+ epsilon) .- 1) ./ (nscale-1)
# Now calculate the sigmoidal weighting function.
weight = 1.0 ./ (1 .+ exp.((cutoff .- width).*g))
# Automatically determine noise threshold
#
# Assuming the noise is Gaussian the response of the filters to noise will
# form Rayleigh distribution. We use the filter responses at the smallest
# scale as a guide to the underlying noise level because the smallest scale
# filters spend most of their time responding to noise, and only
# occasionally responding to features. Either the median, or the mode, of
# the distribution of filter responses can be used as a robust statistic to
# estimate the distribution mean and standard deviation as these are related
# to the median or mode by fixed constants. The response of the larger
# scale filters to noise can then be estimated from the smallest scale
# filter response according to their relative bandwidths.
#
# This code assumes that the expected response to noise on the phase
# congruency calculation is simply the sum of the expected noise responses
# of each of the filters. This is a simplistic overestimate, however these
# two quantities should be related by some constant that will depend on the
# filter bank being used. Appropriate tuning of the parameter 'k' will
# allow you to produce the desired output. (though the value of k seems to
# be not at all critical)
if noisemethod >= 0 # We are using a fixed noise threshold
T = noisemethod # use supplied noiseMethod value as the threshold
else
# Estimate the effect of noise on the sum of the filter responses as
# the sum of estimated individual responses (this is a simplistic
# overestimate). As the estimated noise response at successive scales
# is scaled inversely proportional to bandwidth we have a simple
# geometric sum.
totalTau = tau * (1 - (1/mult)^nscale)/(1-(1/mult))
# Calculate mean and std dev from tau using fixed relationship
# between these parameters and tau. See
# http://mathworld.wolfram.com/RayleighDistribution.html
EstNoiseEnergyMean = totalTau*sqrt(pi/2) # Expected mean and std
EstNoiseEnergySigma = totalTau*sqrt((4-pi)/2) # values of noise energy
T = EstNoiseEnergyMean + k*EstNoiseEnergySigma # Noise threshold
end
#------ Final computation of key quantities -------
# Orientation - this varies +/- pi/2
@. or = atan(-sumh2/sumh1)
# Feature type - a phase angle -pi/2 to pi/2.
@. ft = atan(sumf, sqrt(sumh1^2 + sumh2^2))
# Overall energy
@. energy = sqrt(sumf^2 + sumh1^2 + sumh2^2)
# Compute phase congruency. The original measure,
# PC = energy/sumAn
# is proportional to the weighted cos(phasedeviation). This is not very
# localised
# A more localised measure to use is
# PC = 1 - phasedeviation.
# The expression below uses the fact that the weighted cosine of
# the phase deviation is given by energy/sumAn. Note, in the
# expression below that the noise threshold is not subtracted from
# energy immediately as this would interfere with the phase
# deviation computation. Instead it is applied as a weighting as
# a fraction by which energy exceeds the noise threshold. This
# weighting is applied in addition to the weighting for frequency
# spread. Note also the phase deviation gain factor which acts to
# sharpen up the edge response. A value of 1.5 seems to work well.
# Sensible values are from 1 to about 2.
@. PC = weight*max(1 - deviationgain*acos(energy/(sumAn + epsilon)),0) *
max(energy-T,0)/(energy+epsilon)
return PC, or, ft, T
end
# Case for an image of Gray values
function phasecongmono(img::AbstractArray{T1,2}; nscale::Integer = 4, minwavelength::Real = 3,
mult::Real = 2.1, sigmaonf::Real = 0.55, k::Real = 3.0,
noisemethod::Real = -1, cutoff::Real = 0.5, g::Real = 10.0,
deviationgain::Real = 1.5) where T1 <: Gray
fimg = Float64.(img)
return phasecongmono(fimg, nscale=nscale, minwavelength=minwavelength, mult=mult, sigmaonf=sigmaonf,
k=k, noisemethod=noisemethod, cutoff=cutoff, g=g, deviationgain=deviationgain)
end
#-------------------------------------------------------------------------
"""
rayleighmode
Computes mode of a vector/matrix of data that is assumed to come from a
Rayleigh distribution.
```
Usage: rmode = rayleighmode(data, nbins)
Arguments: data - data assumed to come from a Rayleigh distribution
nbins - Optional number of bins to use when forming histogram
of the data to determine the mode.
```
Mode is computed by forming a histogram of the data over 50 bins and then
finding the maximum value in the histogram. Mean and standard deviation
can then be calculated from the mode as they are related by fixed
constants.
```
mean = mode * sqrt(pi/2)
std dev = mode * sqrt((4-pi)/2)
See
http://mathworld.wolfram.com/RayleighDistribution.html
http://en.wikipedia.org/wiki/Rayleigh_distribution
```
"""
function rayleighmode(X, nbins::Integer= 50)
edges, counts = build_histogram(X, nbins=nbins)
ind = argmax(counts)
return (edges[ind]+edges[ind+1])/2
end
#-------------------------------------------------------------------------
# phasesymmono
"""
Phase symmetry of an image using monogenic filters.
This function calculates the phase symmetry of points in an image.
This is a contrast invariant measure of symmetry. This function can be
used as a line and blob detector. The greyscale polarity of the lines
that you want to find can be specified.
This code is considerably faster than `phasesym()` but you may prefer the
output from `phasesym()`'s oriented filters.
There are potentially many arguments, here is the full usage:
```
(phSym, symmetryEnergy, T) =
phasesymmono(img; nscale, minwaveLength, mult,
sigmaonf, k, polarity, noisemethod)
```
However, apart from the image, all parameters have defaults and the
usage can be as simple as:
```
(phSym,) = phasesymmono(img)
Keyword arguments:
Default values Description
nscale 5 - Number of wavelet scales, try values 3-6
minwaveLength 3 - Wavelength of smallest scale filter.
mult 2.1 - Scaling factor between successive filters.
sigmaonf 0.55 - Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center frequency.
k 2.0 - No of standard deviations of the noise energy beyond
the mean at which we set the noise threshold point.
You may want to vary this up to a value of 10 or
20 for noisy images
polarity 0 - Controls 'polarity' of symmetry features to find.
1 - just return 'bright' points
-1 - just return 'dark' points
0 - return bright and dark points.
noisemethod -1 - Parameter specifies method used to determine
noise statistics.
-1 use median of smallest scale filter responses
-2 use mode of smallest scale filter responses
0+ use noiseMethod value as the fixed noise threshold
A value of 0 will turn off all noise compensation.
Return values:
phSym - Phase symmetry image (values between 0 and 1).
symmetryEnergy - Un-normalised raw symmetry energy which may be
more to your liking.
T - Calculated noise threshold (can be useful for
diagnosing noise characteristics of images)
```
The convolutions are done via the FFT. Many of the parameters relate to the
specification of the filters in the frequency plane. The values do not seem
to be very critical and the defaults are usually fine. You may want to
experiment with the values of `nscales` and `k`, the noise compensation factor.
Notes on filter settings to obtain even coverage of the spectrum
```
sigmaonf .85 mult 1.3
sigmaonf .75 mult 1.6 (filter bandwidth ~1 octave)
sigmaonf .65 mult 2.1
sigmaonf .55 mult 3 (filter bandwidth ~2 octaves)
```
See Also: [`phasesym`](@ref), [`phasecongmono`](@ref)
"""
function phasesymmono(img::AbstractArray{T1,2}; nscale::Integer = 5, minwavelength::Real = 3,
mult::Real = 2.1, sigmaonf::Real = 0.55, k::Real = 2.0,
polarity::Integer = 0, noisemethod::Real = -1) where T1 <: Real
#=
References:
Peter Kovesi, "Symmetry and Asymmetry From Local Phase" AI'97, Tenth
Australian Joint Conference on Artificial Intelligence. 2 - 4 December
1997. http://www.peterkovesi.com/papers/ai97.pdf
Peter Kovesi, "Image Features From Phase Congruency". Videre: A
Journal of Computer Vision Research. MIT Press. Volume 1, Number 3,
Summer 1999 http://www.cs.rochester.edu/u/brown/Videre/001/v13.html
Michael Felsberg and Gerald Sommer, "A New Extension of Linear Signal
Processing for Estimating Local Properties and Detecting Features". DAGM
Symposium 2000, Kiel
Michael Felsberg and Gerald Sommer. "The Monogenic Signal" IEEE
Transactions on Signal Processing, 49(12):3136-3144, December 2001
=#
epsilon = .0001 # Used to prevent division by zero.
(rows,cols) = size(img)
IMG = fft(img) # Fourier transform of image
tau = 0.0
symmetryEnergy = zeros(rows,cols) # Matrix for accumulating weighted phase
# symmetry values (energy).
sumAn = zeros(rows,cols) # Matrix for accumulating filter response
# amplitude values.
IMGF = zeros(ComplexF64, rows, cols)
h = zeros(ComplexF64, rows, cols)
f = zeros(rows, cols)
# Generate filter grids in the frequency domain
(H, freq) = packedmonogenicfilters(rows,cols)
# The two monogenic filters H1 and H2 that are packed within H are
# not selective in terms of the magnitudes of the frequencies.
# The code below generates bandpass log-Gabor filters which are
# point-wise multiplied by IMG to produce different bandpass
# versions of the image before being convolved with H1 and H2. We
# also apply a low-pass filter that is as large as possible, yet
# falls away to zero at the boundaries. All filters are
# multiplied by this to ensure no extra frequencies at the
# 'corners' of the FFT are incorporated as this can upset the
# normalisation process when calculating phase symmetry
for s = 1:nscale
wavelength = minwavelength*mult^(s-1)
fo = 1.0/wavelength # Centre frequency of filter.
# For each element in IMG construct and apply the log Gabor filter and low-pass filter
# to produce IMGF, the bandpassed image in the frequency domain
for n in eachindex(freq)
IMGF[n] = IMG[n]*loggabor(freq[n], fo, sigmaonf)*lowpassfilter(freq[n], 0.4, 10)
end
f .= real.(ifft(IMGF)) # Bandpassed image in spatial domain.
h .= IMGF.*H # Apply monogenic filter.
ifft!(h) # real part of h contains convolution result with h1,
# imaginary part contains convolution result with h2.
# h .= ifft(IMGF.*H) # (not as fast or memory efficient)
# Now calculate the phase symmetry measure.
for n in eachindex(h)
hAmp2 = real(h[n])^2 + imag(h[n])^2 # Squared amplitude of h1 h2 filter results
sumAn[n] += sqrt(f[n]^2 + hAmp2) # Magnitude of Energy.
if polarity == 0 # look for 'white' and 'black' spots
symmetryEnergy[n] += abs(f[n]) - sqrt(hAmp2)
elseif polarity == 1 # Just look for 'white' spots
symmetryEnergy[n] += f[n] - sqrt(hAmp2)
elseif polarity == -1 # Just look for 'black' spots
symmetryEnergy[n] += (-f[n] - sqrt(hAmp2))
end
end
# At the smallest scale estimate noise characteristics from the
# distribution of the filter amplitude responses stored in sumAn.
# tau is the Rayleigh parameter that is used to specify the
# distribution.
if s == 1
if abs(noisemethod + 1) < epsilon # Use median to estimate noise statistics
tau = median(sumAn)/sqrt(log(4))
elseif abs(noisemethod + 2) < epsilon # Use mode to estimate noise statistics
tau = rayleighmode(sumAn)
end
end
end # For each scale
# Compensate for noise
#
# Assuming the noise is Gaussian the response of the filters to noise will
# form Rayleigh distribution. We use the filter responses at the smallest
# scale as a guide to the underlying noise level because the smallest scale
# filters spend most of their time responding to noise, and only
# occasionally responding to features. Either the median, or the mode, of
# the distribution of filter responses can be used as a robust statistic to
# estimate the distribution mean and standard deviation as these are related
# to the median or mode by fixed constants. The response of the larger
# scale filters to noise can then be estimated from the smallest scale
# filter response according to their relative bandwidths.
#
# This code assumes that the expected response to noise on the phase symmetry
# calculation is simply the sum of the expected noise responses of each of
# the filters. This is a simplistic overestimate, however these two
# quantities should be related by some constant that will depend on the
# filter bank being used. Appropriate tuning of the parameter 'k' will
# allow you to produce the desired output. (though the value of k seems to
# be not at all critical)
if noisemethod >= 0 # We are using a fixed noise threshold
T = noisemethod # use supplied noiseMethod value as the threshold
else
# Estimate the effect of noise on the sum of the filter responses as
# the sum of estimated individual responses (this is a simplistic
# overestimate). As the estimated noise response at successive scales
# is scaled inversely proportional to bandwidth we have a simple
# geometric sum.
totalTau = tau * (1 - (1/mult)^nscale)/(1-(1/mult))
# Calculate mean and std dev from tau using fixed relationship
# between these parameters and tau. See
# http://mathworld.wolfram.com/RayleighDistribution.html
EstNoiseEnergyMean = totalTau*sqrt(pi/2) # Expected mean and std
EstNoiseEnergySigma = totalTau*sqrt((4-pi)/2) # values of noise energy
# Noise threshold, make sure it is not less than epsilon
T = max(EstNoiseEnergyMean + k*EstNoiseEnergySigma, epsilon)
end
# Apply noise threshold - effectively wavelet denoising soft thresholding
# and normalize symmetryEnergy by the sumAn to obtain phase symmetry.
# Note the max operation is not necessary if you are after speed, it is
# just 'tidy' not having -ve symmetry values
phSym = max.(symmetryEnergy .- T, 0) ./ (sumAn .+ epsilon)
return phSym, symmetryEnergy, T
end
# Version for an array of Gray elements
function phasesymmono(img::AbstractArray{T1,2}; nscale::Integer = 5, minwavelength::Real = 3,
mult::Real = 2.1, sigmaonf::Real = 0.55, k::Real = 2.0,
polarity::Integer = 0, noisemethod::Real = -1) where T1 <: Gray
fimg = Float64.(img)
return phasesymmono(fimg; nscale=nscale, minwavelength= minwavelength,
mult=mult, sigmaonf=sigmaonf, k=k,
polarity=polarity, noisemethod=noisemethod)
end
#------------------------------------------------------------------
# monofilt
"""
Apply monogenic filters to an image to obtain 2D analytic signal.
This is an implementation of Felsberg's monogenic filters
```
Usage: (f, h1f, h2f, A, theta, psi) =
monofilt(img, nscale, minWaveLength, mult, sigmaOnf, orientWrap)
3 4 2 0.65 true/false
Arguments:
The convolutions are done via the FFT. Many of the parameters relate
to the specification of the filters in the frequency plane.
Variable Suggested Description
name value
----------------------------------------------------------
img Image to be convolved. An Array of Real or Gray.
nscale = 3 Number of filter scales.
minWaveLength = 4 Wavelength of smallest scale filter.
mult = 2 Scaling factor between successive filters.
sigmaonf = 0.65 Ratio of the standard deviation of the
Gaussian describing the log Gabor filter's
transfer function in the frequency domain
to the filter center frequency.
orientWrap false Optional Boolean flag to turn on/off
'wrapping' of orientation data from a
range of -pi .. pi to the range 0 .. pi.
This affects the interpretation of the
phase angle - see note below. Defaults to false.
Returns:
f - vector of bandpass filter responses with respect to scale.
h1f - vector of bandpass h1 filter responses wrt scale.
h2f - vector of bandpass h2 filter responses.
A - vector of monogenic energy responses.
theta - vector of phase orientation responses.
psi - vector of phase angle responses.
```
If `orientWrap` is true `theta` will be returned in the range `0 .. pi`
Experimentation with `sigmaonf` can be useful depending on your application.
I have found values as low as 0.2 (a filter with a *very* large bandwidth)
to be useful on some occasions.
See also: [`gaborconvolve`](@ref)
"""
function monofilt(img::AbstractArray{T1,2}, nscale::Integer, minWaveLength::Real, mult::Real,
sigmaOnf::Real, orientWrap::Bool = false) where T1 <: Real
#=
References:
Michael Felsberg and Gerald Sommer. "A New Extension of Linear Signal
Processing for Estimating Local Properties and Detecting Features"
DAGM Symposium 2000, Kiel
Michael Felsberg and Gerald Sommer. "The Monogenic Signal" IEEE
Transactions on Signal Processing, 49(12):3136-3144, December 2001
=#
(rows,cols) = size(img)
IMG = fft(img)
# Generate filters
(H1, H2, freq) = monogenicfilters(rows,cols)
# The two monogenic filters H1 and H2 are oriented in frequency space
# but are not selective in terms of the magnitudes of the
# frequencies. The code below generates bandpass log-Gabor filters
# which are point-wise multiplied by H1 and H2 to produce different
# bandpass versions of H1 and H2
psi = Array{Array{Float64,2}}(undef, nscale)
theta = Array{Array{Float64,2}}(undef, nscale)
A = Array{Array{Float64,2}}(undef, nscale)
f = Array{Array{Float64,2}}(undef, nscale)
h1f = Array{Array{Float64,2}}(undef, nscale)
h2f = Array{Array{Float64,2}}(undef, nscale)
H1s = zeros(ComplexF64, rows, cols)
H2s = zeros(ComplexF64, rows, cols)
logGabor = zeros(rows, cols)
for s = 1:nscale
wavelength = minWaveLength*mult^(s-1)
fo = 1.0/wavelength # Centre frequency of filter.
@. logGabor = loggabor(freq, fo, sigmaOnf)
# Generate bandpass versions of H1 and H2 at this scale
H1s .= H1.*logGabor
H2s .= H2.*logGabor
# Apply filters to image in the frequency domain and get spatial
# results
f[s] = real.(ifft(IMG.*logGabor))
h1f[s] = real.(ifft(IMG.*H1s))
h2f[s] = real.(ifft(IMG.*H2s))
A[s] = sqrt.(f[s].^2 .+ h1f[s].^2 .+ h2f[s].^2) # Magnitude of Energy.
theta[s] = atan.(h2f[s], h1f[s]) # Orientation.
# Here phase is measured relative to the h1f-h2f plane as an
# 'elevation' angle that ranges over +- pi/2
psi[s] = atan.(f[s], sqrt.(h1f[s].^2 .+ h2f[s].^2))
if orientWrap # Wrap orientation values back into the range 0-pi
theta[s][theta[s] .< 0] += pi
end
end
return f, h1f, h2f, A, theta, psi
end
# Version for an array of Gray elements
function monofilt(img::AbstractArray{T1,2}, nscale::Integer, minWaveLength::Real, mult::Real,
sigmaOnf::Real, orientWrap::Bool = false) where T1 <: Gray
fimg = Float64.(img)
return monofilt(fimg, nscale, minWaveLength, mult, sigmaOnf, orientWrap)
end
#------------------------------------------------------------------
# gaborconvolve
"""
Convolve an image with a bank of log-Gabor filters.
```
Usage: (EO, BP) = gaborconvolve(img, nscale, norient, minWaveLength, mult,
sigmaOnf, dThetaOnSigma, Lnorm)
Arguments:
The convolutions are done via the FFT. Many of the parameters relate
to the specification of the filters in the frequency plane.
Variable Suggested Description
name value
----------------------------------------------------------
img Image to be convolved.
nscale = 4 Number of wavelet scales.
norient = 6 Number of filter orientations.
minWaveLength = 3 Wavelength of smallest scale filter.
mult = 1.7 Scaling factor between successive filters.
sigmaOnf = 0.65 Ratio of the standard deviation of the
Gaussian describing the log Gabor filter's
transfer function in the frequency domain
to the filter center frequency.
dThetaOnSigma = 1.3 Ratio of angular interval between filter
orientations and the standard deviation of
the angular Gaussian function used to
construct filters in the freq. plane.
Lnorm 0 Optional integer indicating what norm the
filters should be normalized to. A value of 1
will produce filters with the same L1 norm, 2
will produce filters with matching L2
norm. the default value of 0 results in no
normalization (the filters have unit height
Gaussian transfer functions on a log frequency
scale)
Returns:
EO - 2D array of arrays of complex valued convolution results
EO[s,o] = convolution result for scale s and orientation o.
The real part is the result of convolving with the even
symmetric filter, the imaginary part is the result from
convolution with the odd symmetric filter.
Hence:
abs.(EO[s,o]) returns the magnitude of the convolution over the
image at scale s and orientation o.
angle.(EO[s,o]) returns the phase angles.
BP - Array of bandpass images corresponding to each scale s.
```
Notes on filter settings to obtain even coverage of the spectrum energy
```
dThetaOnSigma 1.2 - 1.3
sigmaOnf .90 mult 1.15
sigmaOnf .85 mult 1.2
sigmaOnf .75 mult 1.4 (bandwidth ~1 octave)
sigmaOnf .65 mult 1.7
sigmaOnf .55 mult 2.2 (bandwidth ~2 octaves)
```
The determination of `mult` given `sigmaOnf` is entirely empirical. What I do is
plot out the sum of the squared filter amplitudes in the frequency domain and
see how even the coverage of the spectrum is. If there are concentric 'gaps'
in the spectrum one needs to reduce mult and/or reduce `sigmaOnf` (which
increases filter bandwidth)
If there are 'gaps' radiating outwards then one needs to reduce `dthetaOnSigma`
(increasing angular bandwidth of the filters)
"""
function gaborconvolve(img::AbstractArray{T1,2}, nscale::Integer, norient::Integer, minWaveLength::Real,
mult::Real, sigmaOnf::Real, dThetaOnSigma::Real, Lnorm::Integer = 0) where T1 <:Real
#=
For details of log-Gabor filters see:
D. J. Field, "Relations Between the Statistics of Natural Images and the
Response Properties of Cortical Cells", Journal of The Optical Society of
America A, Vol 4, No. 12, December 1987. pp 2379-2394
=#
if !in(Lnorm, [0, 1, 2])
error("Lnorm must be 0 1 or 2")
end
(rows, cols) = size(img)
IMG = fft(img)
EO = Array{Array{ComplexF64,2}}(undef, nscale, norient)
BP = Array{Array{Float64,2}}(undef, nscale)
logGabor = Array{Array{Float64,2}}(undef, nscale)
filter = zeros(rows, cols)
angfilter = zeros(rows, cols)
# Generate grid data for constructing filters in the frequency domain
(freq, fx, fy) = filtergrids(rows, cols)
(sintheta, costheta) = gridangles(freq, fx, fy)
# Calculate the standard deviation of the angular Gaussian
# function used to construct filters in the freq. plane.
thetaSigma = pi/norient/dThetaOnSigma
# Filters are constructed in terms of two components.
# 1) The radial component, which controls the frequency band that the filter
# responds to
# 2) The angular component, which controls the orientation that the filter
# responds to.
# The two components are multiplied together to construct the overall filter.
# Construct the radial filter components. All log Gabor filters
# are multiplied by a large, but sharp,low-pass filter to ensure
# no extra frequencies at the 'corners' of the FFT are
# incorporated. This keeps the overall norm of each filter not too
# dissimilar.
for s = 1:nscale
wavelength = minWaveLength*mult^(s-1)
fo = 1.0/wavelength # Centre frequency of filter.
# Construct the log Gabor filter and apply the low-pass filter
logGabor[s] = zeros(rows,cols)
for n in eachindex(freq)
logGabor[s][n] = loggabor(freq[n], fo, sigmaOnf)*lowpassfilter(freq[n], 0.45, 15)
end
# Compute bandpass image for each scale
if Lnorm == 2 # Normalize filters to have same L2 norm
L = sqrt.(sum(logGabor[s].^2))
elseif Lnorm == 1 # Normalize to have same L1
L = sum(abs.(real.(ifft(logGabor[s]))))
elseif Lnorm == 0 # No normalization
L = 1
end
logGabor[s] ./= L
BP[s] = real.(ifft(IMG .* logGabor[s]))
end
# The main loop...
for o = 1:norient # For each orientation.
# Construct the angular filter
angl = (o-1)*pi/norient # Filter angle.
angfilter .= gaussianangularfilter(angl, thetaSigma, sintheta, costheta)
wavelength = minWaveLength # Initialize filter wavelength.
for s = 1:nscale # For each scale.
# Multiply by the angular filter to get the overall filter
@. filter = logGabor[s] * angfilter
if Lnorm == 2 # Normalize filters to have the same L2 norm (** Why sqrt(2)?)
L = sqrt.(sum(real.(filter).^2 + imag.(filter).^2 ))/sqrt(2)
elseif Lnorm == 1 # Normalize to have same L1
L = sum(abs.(real.(ifft(filter))))
elseif Lnorm == 0 # No normalization
L = 1
end
filter ./= L
# Do the convolution, back transform, and save the result in EO
EO[s,o] = ifft(IMG .* filter)
wavelength = wavelength * mult # Wavelength of next filter
end # ... and process the next scale
end # For each orientation
return EO, BP
end
# Version for an array of Gray elements
function gaborconvolve(img::AbstractArray{T1,2}, nscale::Integer, norient::Integer, minWaveLength::Real,
mult::Real, sigmaOnf::Real, dThetaOnSigma::Real, Lnorm::Integer = 0) where T1 <: Gray
fimg = Float64.(img)
return gaborconvolve(fimg, nscale, norient, minWaveLength, mult, sigmaOnf, dThetaOnSigma, Lnorm)
end
#------------------------------------------------------------------
# phasecong3
"""
Computes edge and corner phase congruency in an image via log-Gabor filters.
There are potentially many arguments, here is the full usage:
```
(M, m, or, ft, EO, T) = phasecong3(img; nscale, norient, minwavelength,
mult, sigmaonf, k, cutoff, g, noisemethod)
```
However, apart from the image, all parameters have defaults and the
usage can be as simple as:
```
(M,) = phasecong3(img)
Keyword Arguments:
Default values Description
nscale 4 - Number of wavelet scales, try values 3-6
norient 6 - Number of filter orientations.
minwavelength 3 - Wavelength of smallest scale filter.
mult 2.1 - Scaling factor between successive filters.
sigmaonf 0.55 - Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center frequency.
k 2.0 - No of standard deviations of the noise energy beyond
the mean at which we set the noise threshold point.
You may want to vary this up to a value of 10 or
20 for noisy images
cutoff 0.5 - The fractional measure of frequency spread
below which phase congruency values get penalized.
g 10 - Controls the sharpness of the transition in
the sigmoid function used to weight phase
congruency for frequency spread.
noisemethod -1 - Parameter specifies method used to determine
noise statistics.
-1 use median of smallest scale filter responses
-2 use mode of smallest scale filter responses
0+ use noisemethod value as the fixed noise threshold
Returned values:
M - Maximum moment of phase congruency covariance.
This is used as a indicator of edge strength.
m - Minimum moment of phase congruency covariance.
This is used as a indicator of corner strength.
or - Orientation image in radians -pi/2 to pi/2, +ve anticlockwise.
0 corresponds to a vertical edge, pi/2 is horizontal.
ft - Local weighted mean phase angle at every point in the
image. A value of pi/2 corresponds to a bright line, 0
corresponds to a step and -pi/2 is a dark line.
EO - A 2D array of complex valued convolution results for each scale
and orientation
T - Calculated noise threshold (can be useful for
diagnosing noise characteristics of images). Once you know
this you can then specify fixed thresholds and save some
computation time.
```
`EO[s,o]` = convolution result for scale `s` and orientation `o`. The real part
is the result of convolving with the even symmetric filter, the imaginary
part is the result from convolution with the odd symmetric filter.
Hence:
`abs.(EO[s,o])` returns the magnitude of the convolution over the
image at scale `s` and orientation `o`,
`angle.(EO[s,o])` returns the phase angles.
The convolutions are done via the FFT. Many of the parameters relate to the
specification of the filters in the frequency plane. The values do not seem
to be very critical and the defaults are usually fine. You may want to
experiment with the values of `nscales` and `k`, the noise compensation factor.
Some filter parameters to obtain even coverage of the spectrum
```
sigmaonf .85 mult 1.3
sigmaonf .75 mult 1.6 (filter bandwidth ~1 octave)
sigmaonf .65 mult 2.1
sigmaonf .55 mult 3 (filter bandwidth ~2 octaves)
```
See also: [`phasesym`](@ref), [`gaborconvolve`](@ref)
"""
function phasecong3(img::AbstractArray{T1,2}; nscale::Integer = 4, norient::Integer = 6,
minwavelength::Real = 3, mult::Real = 2.1, sigmaonf::Real = 0.55,
k::Real = 2, cutoff::Real = 0.5, g::Real = 10,
noisemethod::Real = -1) where T1 <: Real
#=
References:
Peter Kovesi, "Image Features From Phase Congruency". Videre: A
Journal of Computer Vision Research. MIT Press. Volume 1, Number 3,
Summer 1999 http://www.cs.rochester.edu/u/brown/Videre/001/v13.html
Peter Kovesi, "Phase Congruency Detects Corners and
Edges". Proceedings DICTA 2003, Sydney Dec 10-12. IEEE Xplore
Preprint: http://www.peterkovesi.com/papers/phasecorners.pdf
=#
# To Do: Extra sanity checks on arguments
epsilon = 1e-5 # Used to prevent division by zero.
(rows,cols) = size(img)
IMG = fft(img)
# A massive set of buffer matrices...
logGabor = Array{Array{Float64,2}}(undef, nscale)
filter = zeros(rows, cols)
EO = Array{Array{ComplexF64,2}}(undef, nscale, norient) # Array of convolution results.
EnergyV = zeros(rows,cols,3) # Total energy vector, used for
# feature orientation and type
# calculation
covx2 = zeros(rows,cols) # Matrices for covariance data
covy2 = zeros(rows,cols)
covxy = zeros(rows,cols)
# Various arrays for the computation of phase congruency at each orientation
sumE_ThisOrient = zeros(rows,cols)
sumO_ThisOrient = zeros(rows,cols)
sumAn_ThisOrient = zeros(rows,cols)
Energy = zeros(rows,cols)
MeanE = zeros(rows,cols)
MeanO = zeros(rows,cols)
An = zeros(rows,cols)
maxAn = zeros(rows,cols)
M = zeros(rows,cols) # Output: max and min moments of covariance
m = zeros(rows,cols)
T = 0.0 # Needed in main scope
tau = 0.0
# Generate grid data for constructing filters in the frequency domain
(freq, fx, fy) = filtergrids(rows, cols)
(sintheta, costheta) = gridangles(freq, fx, fy)
# Filters are constructed in terms of two components.
# 1) The radial component, which controls the frequency band that the filter
# responds to
# 2) The angular component, which controls the orientation that the filter
# responds to.
# The two components are multiplied together to construct the overall filter.
# Construct the radial filter components. All log Gabor filters
# are multiplied by a large, but sharp,low-pass filter to ensure
# no extra frequencies at the 'corners' of the FFT are
# incorporated. This ensures no extra frequencies at the
# 'corners' of the FFT are incorporated as this seems to upset the
# normalisation process when calculating phase congruency.
for s = 1:nscale
wavelength = minwavelength*mult^(s-1)
fo = 1.0/wavelength # Centre frequency of filter.
# Construct the log Gabor filter and apply the low-pass filter
logGabor[s] = zeros(rows,cols)
for n in eachindex(freq)
logGabor[s][n] = loggabor(freq[n], fo, sigmaonf)*lowpassfilter(freq[n], 0.45, 15)
end
end
## The main loop...
for o = 1:norient # For each orientation...
# Construct the angular filter function
angl = (o-1)*pi/norient # Filter angle.
wavelen = 4*pi/norient # Desired wavelength of cosine window function
angfilter = cosineangularfilter(angl, wavelen, sintheta, costheta)
sumE_ThisOrient .= 0 # Initialize accumulator matrices.
sumO_ThisOrient .= 0
sumAn_ThisOrient .= 0
Energy .= 0
for s = 1:nscale # For each scale...
filter .= logGabor[s] .* angfilter # Multiply radial and angular
# components to get the filter.
# Convolve image with even and odd filters returning the result in EO
EO[s,o] = ifft(IMG .* filter)
An .= abs.(EO[s,o]) # Amplitude of even & odd filter response.
sumAn_ThisOrient .+= An # Sum of amplitude responses.
sumE_ThisOrient .+= real.(EO[s,o]) # Sum of even filter convolution results.
sumO_ThisOrient .+= imag.(EO[s,o]) # Sum of odd filter convolution results.
# At the smallest scale estimate noise characteristics from the
# distribution of the filter amplitude responses stored in sumAn.
# tau is the Rayleigh parameter that is used to describe the
# distribution.
if s == 1
if abs(noisemethod + 1) < epsilon # Use median to estimate noise statistics
tau = median(sumAn_ThisOrient)/sqrt(log(4))
elseif abs(noisemethod + 2) < epsilon # Use mode to estimate noise statistics
tau = rayleighmode(sumAn_ThisOrient)
end
maxAn .= An
else
# Record maximum amplitude of components across scales. This is needed
# to determine the frequency spread weighting.
maxAn .= max.(maxAn,An)
end
end # ... and process the next scale
# Accumulate total 3D energy vector data, this will be used to
# determine overall feature orientation and feature phase/type
EnergyV[:,:,1] .+= sumE_ThisOrient
EnergyV[:,:,2] .+= cos(angl)*sumO_ThisOrient
EnergyV[:,:,3] .+= sin(angl)*sumO_ThisOrient
# Get weighted mean filter response vector, this gives the weighted mean
# phase angle.
for n in eachindex(Energy)
XEnergy = sqrt(sumE_ThisOrient[n]^2 + sumO_ThisOrient[n]^2) + epsilon
MeanE[n] = sumE_ThisOrient[n] / XEnergy
MeanO[n] = sumO_ThisOrient[n] / XEnergy
end
# Now calculate An(cos(phase_deviation) - | sin(phase_deviation)) | by
# using dot and cross products between the weighted mean filter response
# vector and the individual filter response vectors at each scale. This
# quantity is phase congruency multiplied by An, which we call energy.
for s = 1:nscale
for n in eachindex(Energy)
E = real(EO[s,o][n]); # Extract even and odd
O = imag(EO[s,o][n]) # convolution results.
Energy[n] += (E*MeanE[n] + O*MeanO[n] - abs(E*MeanO[n] - O*MeanE[n]))
end
end
## Automatically determine noise threshold
#
# Assuming the noise is Gaussian the response of the filters to noise will
# form Rayleigh distribution. We use the filter responses at the smallest
# scale as a guide to the underlying noise level because the smallest scale
# filters spend most of their time responding to noise, and only
# occasionally responding to features. Either the median, or the mode, of
# the distribution of filter responses can be used as a robust statistic to
# estimate the distribution mean and standard deviation as these are related
# to the median or mode by fixed constants. The response of the larger
# scale filters to noise can then be estimated from the smallest scale
# filter response according to their relative bandwidths.
#
# This code assumes that the expected response to noise on the phase congruency
# calculation is simply the sum of the expected noise responses of each of
# the filters. This is a simplistic overestimate, however these two
# quantities should be related by some constant that will depend on the
# filter bank being used. Appropriate tuning of the parameter 'k' will
# allow you to produce the desired output.
if noisemethod >= 0 # We are using a fixed noise threshold
T = noisemethod # use supplied noiseMethod value as the threshold
else
# Estimate the effect of noise on the sum of the filter responses as
# the sum of estimated individual responses (this is a simplistic
# overestimate). As the estimated noise response at successive scales
# is scaled inversely proportional to bandwidth we have a simple
# geometric sum.
totalTau = tau * (1 - (1/mult)^nscale)/(1-(1/mult))
# Calculate mean and std dev from tau using fixed relationship
# between these parameters and tau. See
# http://mathworld.wolfram.com/RayleighDistribution.html
EstNoiseEnergyMean = totalTau*sqrt(pi/2) # Expected mean and std
EstNoiseEnergySigma = totalTau*sqrt((4-pi)/2) # values of noise energy
T = EstNoiseEnergyMean + k*EstNoiseEnergySigma # Noise threshold
end
# Apply noise threshold, this is effectively wavelet denoising via
# soft thresholding.
@. Energy = max(Energy - T, 0)
for n in eachindex(Energy)
# Form weighting that penalizes frequency distributions
# that are particularly narrow. Calculate fractional
# 'width' of the frequencies present by taking the sum of
# the filter response amplitudes and dividing by the
# maximum amplitude at each point on the image. If there
# is only one non-zero component width takes on a value of
# 0, if all components are equal width is 1.
width = (sumAn_ThisOrient[n]/(maxAn[n] + epsilon) - 1) / (nscale-1)
# The sigmoidal weighting function for this orientation given the 'width'
weight = 1.0 / (1 + exp((cutoff - width)*g))
# Apply weighting to energy and then calculate phase congruency
PCo = weight*Energy[n]/sumAn_ThisOrient[n]
# Build up covariance data for every point
covx = PCo*cos(angl)
covy = PCo*sin(angl)
covx2[n] += covx^2
covy2[n] += covy^2
covxy[n] += covx*covy
end
end # For each orientation
## Edge and Corner calculations
# The following code calculates the principal vector of the phase
# congruency covariance data and calculates the minimum and
# maximum moments - these correspond to the singular values.
for n in eachindex(Energy)
# First normalise covariance values by the number of orientations/2
covx2[n] /= (norient/2)
covy2[n] /= (norient/2)
covxy[n] *= 4/norient # This gives us 2*covxy/(norient/2)
denom = sqrt(covxy[n]^2 + (covx2[n]-covy2[n])^2)+epsilon
M[n] = (covy2[n]+covx2[n] + denom)/2 # Maximum moment
m[n] = (covy2[n]+covx2[n] - denom)/2 # ... and minimum moment
end
# Orientation and feature phase/type computation
@views or = atan.(-EnergyV[:,:,3]./EnergyV[:,:,2])
OddV = sqrt.(EnergyV[:,:,2].^2 + EnergyV[:,:,3].^2)
@views featType = atan.(EnergyV[:,:,1], OddV) # Feature phase pi/2 <-> white line,
# 0 <-> step, -pi/2 <-> black line
return M, m, or, featType, EO, T
end
# Version for an array of Gray elements
function phasecong3(img::AbstractArray{T1,2}; nscale::Integer = 4, norient::Integer = 6,
minwavelength::Real = 3, mult::Real = 2.1, sigmaonf::Real = 0.55,
k::Real = 2, cutoff::Real = 0.5, g::Real = 10,
noisemethod::Real = -1) where T1 <: Gray
fimg = Float64.(img)
return phasecong3(fimg; nscale=nscale, norient=norient,
minwavelength=minwavelength, mult=mult, sigmaonf=sigmaonf,
k=k, cutoff=cutoff, g=g, noisemethod=noisemethod)
end
#------------------------------------------------------------------
# phasesym
"""
Compute phase symmetry on an image via log-Gabor filters.
This function calculates the phase symmetry of points in an image.
This is a contrast invariant measure of symmetry. This function can be
used as a line and blob detector. The greyscale polarity of the lines
that you want to find can be specified.
```
Usage: (phSym, orientation, totalEnergy, T) =
phasesym(img; nscale = 5, norient = 6, minwavelength = 3, mult = 2.1,
sigmaonf = 0.55, k = 2, polarity = 0, noisemethod = -1)
However, apart from the image, all parameters have defaults and the
usage can be as simple as:
(phSym,) = phasesym(img)
Argument:
img - Image to be processed. 2D Array of Real or Gray
Keyword Arguments:
Default values Description
nscale 5 - Number of wavelet scales, try values 3-6
norient 6 - Number of filter orientations.
minwavelength 3 - Wavelength of smallest scale filter.
mult 2.1 - Scaling factor between successive filters.
sigmaonf 0.55 - Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center frequency.
k 2.0 - No of standard deviations of the noise energy beyond
the mean at which we set the noise threshold point.
You may want to vary this up to a value of 10 or
20 for noisy images
polarity 0 - Controls 'polarity' of symmetry features to find.
1 - just return 'bright' points
-1 - just return 'dark' points
0 - return bright and dark points.
noisemethod -1 - Parameter specifies method used to determine
noise statistics.
-1 use median of smallest scale filter responses
-2 use mode of smallest scale filter responses
0+ use noiseMethod value as the fixed noise threshold.
Return values:
phSym - Phase symmetry image (values between 0 and 1).
orientation - Orientation image. Orientation in which local
symmetry energy is a maximum, in radians
(-pi/2 - pi/2), angles positive anti-clockwise. Note
the orientation info is quantized by the number
of orientations
totalEnergy - Un-normalised raw symmetry energy which may be
more to your liking.
T - Calculated noise threshold (can be useful for
diagnosing noise characteristics of images). Once you know
this you can then specify fixed thresholds and save some
computation time.
```
The convolutions are done via the FFT. Many of the parameters relate to the
specification of the filters in the frequency plane. The values do not seem
to be very critical and the defaults are usually fine. You may want to
experiment with the values of `nscales` and `k`, the noise compensation factor.
Notes on filter settings to obtain even coverage of the spectrum
```
sigmaonf .85 mult 1.3
sigmaonf .75 mult 1.6 (filter bandwidth ~1 octave)
sigmaonf .65 mult 2.1
sigmaonf .55 mult 3 (filter bandwidth ~2 octaves)
```
See also: [`phasesymmono`](@ref), [`phasecong3`](@ref)
"""
function phasesym(img::AbstractArray{T1,2}; nscale::Integer = 5, norient::Integer = 6,
minwavelength::Real = 3, mult::Real = 2.1, sigmaonf::Real = 0.55,
k::Real = 2.0, polarity::Integer = 0, noisemethod::Real = -1) where T1 <: Real
#=
References:
Peter Kovesi, "Symmetry and Asymmetry From Local Phase" AI'97, Tenth
Australian Joint Conference on Artificial Intelligence. 2 - 4 December
1997. http://www.peterkovesi.com/papers/ai97.pdf
Peter Kovesi, "Image Features From Phase Congruency". Videre: A
Journal of Computer Vision Research. MIT Press. Volume 1, Number 3,
Summer 1999 http://www.cs.rochester.edu/u/brown/Videre/001/v13.html
=#
epsilon = 1e-4 # Used to prevent division by zero.
(rows,cols) = size(img)
IMG = fft(img)
logGabor = Array{Array{Float64,2}}(undef, nscale)
filter = zeros(rows,cols)
totalEnergy = zeros(rows,cols) # Matrix for accumulating weighted phase
# congruency values (energy).
totalSumAn = zeros(rows,cols) # Matrix for accumulating filter response
# amplitude values.
orientation = zeros(rows,cols) # Matrix storing orientation with greatest
# energy for each pixel.
maxEnergy = zeros(rows,cols)
sumAn_ThisOrient = zeros(rows,cols)
Energy_ThisOrient = zeros(rows,cols)
An = zeros(rows,cols)
EO = zeros(ComplexF64, rows,cols)
tau = 0.0
T = 0.0 # Need in main scope
# Generate grid data for constructing filters in the frequency domain
(freq, fx, fy) = filtergrids(rows, cols)
(sintheta, costheta) = gridangles(freq, fx, fy)
# Filters are constructed in terms of two components.
# 1) The radial component, which controls the frequency band that the filter
# responds to
# 2) The angular component, which controls the orientation that the filter
# responds to.
# The two components are multiplied together to construct the overall filter.
# Construct the radial filter components. All log Gabor filters
# are multiplied by a large, but sharp,low-pass filter to ensure
# no extra frequencies at the 'corners' of the FFT are
# incorporated. This ensures no extra frequencies at the 'corners'
# of the FFT are incorporated as this seems to upset the
# normalisation process when calculating phase congruency.
for s = 1:nscale
wavelength = minwavelength*mult^(s-1)
fo = 1.0/wavelength # Centre frequency of filter.
# Construct the log Gabor filter and apply the low-pass filter
logGabor[s] = zeros(rows,cols)
for n in eachindex(freq)
logGabor[s][n] = loggabor(freq[n], fo, sigmaonf)*lowpassfilter(freq[n], 0.45, 15)
end
end
## The main loop...
for o = 1:norient # For each orientation....
# Construct the angular filter
angl = (o-1)*pi/norient # Filter angle.
wavelen = 4*pi/norient # Desired wavelength of cosine window function
angfilter = cosineangularfilter(angl, wavelen, sintheta, costheta)
sumAn_ThisOrient .= 0
Energy_ThisOrient .= 0
for s = 1:nscale # For each scale....
filter .= logGabor[s] .* angfilter # Multiply radial and angular
# components to get filter.
# Convolve image with even and odd filters returning the result in EO
EO .= ifft(IMG .* filter)
An .= abs.(EO) # Amplitude of even & odd filter response.
sumAn_ThisOrient .+= An # Sum of amplitude responses.
# At the smallest scale estimate noise characteristics from the
# distribution of the filter amplitude responses stored in sumAn.
# tau is the Rayleigh parameter that is used to describe the
# distribution.
if s == 1
if abs(noisemethod + 1) < epsilon # Use median to estimate noise statistics
tau = median(sumAn_ThisOrient)/sqrt(log(4))
elseif abs(noisemethod + 2) < epsilon # Use mode to estimate noise statistics
tau = rayleighmode(sumAn_ThisOrient)
end
end
# Now calculate the phase symmetry measure.
if polarity == 0 # look for 'white' and 'black' spots
Energy_ThisOrient .+= (abs.(real.(EO)) - abs.(imag.(EO)))
elseif polarity == 1 # Just look for 'white' spots
Energy_ThisOrient .+= (real.(EO) - abs.(imag.(EO)))
elseif polarity == -1 # Just look for 'black' spots
Energy_ThisOrient .+= (-real.(EO) - abs.(imag.(EO)))
end
end # ... and process the next scale
## Automatically determine noise threshold
#
# Assuming the noise is Gaussian the response of the filters to noise will
# form Rayleigh distribution. We use the filter responses at the smallest
# scale as a guide to the underlying noise level because the smallest scale
# filters spend most of their time responding to noise, and only
# occasionally responding to features. Either the median, or the mode, of
# the distribution of filter responses can be used as a robust statistic to
# estimate the distribution mean and standard deviation as these are related
# to the median or mode by fixed constants. The response of the larger
# scale filters to noise can then be estimated from the smallest scale
# filter response according to their relative bandwidths.
#
# This code assumes that the expected response to noise on the phase congruency
# calculation is simply the sum of the expected noise responses of each of
# the filters. This is a simplistic overestimate, however these two
# quantities should be related by some constant that will depend on the
# filter bank being used. Appropriate tuning of the parameter 'k' will
# allow you to produce the desired output.
if noisemethod >= 0 # We are using a fixed noise threshold
T = noisemethod # use supplied noiseMethod value as the threshold
else
# Estimate the effect of noise on the sum of the filter responses as
# the sum of estimated individual responses (this is a simplistic
# overestimate). As the estimated noise response at successive scales
# is scaled inversely proportional to bandwidth we have a simple
# geometric sum.
totalTau = tau * (1 - (1/mult)^nscale)/(1-(1/mult))
# Calculate mean and std dev from tau using fixed relationship
# between these parameters and tau. See
# http://mathworld.wolfram.com/RayleighDistribution.html
EstNoiseEnergyMean = totalTau*sqrt(pi/2) # Expected mean and std
EstNoiseEnergySigma = totalTau*sqrt((4-pi)/2) # values of noise energy
# Noise threshold, make sure it is not less than epsilon.
T = max(EstNoiseEnergyMean + k*EstNoiseEnergySigma, epsilon)
end
# Apply noise threshold, this is effectively wavelet denoising via
# soft thresholding. Note 'Energy_ThisOrient' will have -ve values.
# These will be floored out at the final normalization stage.
Energy_ThisOrient .-= T
# Update accumulator matrix for sumAn and totalEnergy
totalSumAn .+= sumAn_ThisOrient
totalEnergy .+= Energy_ThisOrient
# Update orientation matrix by finding image points where the
# energy in this orientation is greater than in any previous
# orientation and then replacing these elements in the
# orientation matrix with the current orientation number.
if o == 1
maxEnergy .= Energy_ThisOrient
else
for n in eachindex(maxEnergy)
if Energy_ThisOrient[n] > maxEnergy[n]
orientation[n] = o - 1
maxEnergy[n] = Energy_ThisOrient[n]
end
end
end
end # For each orientation
# Normalize totalEnergy by the totalSumAn to obtain phase symmetry
# totalEnergy is floored at 0 to eliminate -ve values
phSym = max.(totalEnergy, 0) ./ (totalSumAn .+ epsilon)
# Convert orientation values to radians and offset to suit thin_edges_nonmaxsup()
orientation .= orientation*pi/norient .- pi/2
return phSym, orientation, totalEnergy, T
end
# Version for an array of Gray elements
function phasesym(img::AbstractArray{T1,2}; nscale::Integer = 5, norient::Integer = 6,
minwavelength::Real = 3, mult::Real = 2.1, sigmaonf::Real = 0.55,
k::Real = 2.0, polarity::Integer = 0, noisemethod::Real = -1) where T1 <: Gray
fimg = Float64.(img)
return phasesym(fimg; nscale=nscale, norient=norient,
minwavelength=minwavelength, mult=mult, sigmaonf=sigmaonf,
k=k, polarity=polarity, noisemethod=noisemethod)
end
#------------------------------------------------------------------
# ppdenoise
"""
Phase preserving wavelet image denoising.
```
Usage: cleanimage = ppdenoise(img, nscale = 5, norient = 6,
mult = 2.5, minwavelength = 2, sigmaonf = 0.55,
dthetaonsigma = 1.0, k = 3, softness = 1.0)
Argument:
img - Image to be processed (greyscale)
Keyword arguments:
nscale - No of filter scales to use (5-7) - the more scales used
the more low frequencies are covered.
norient - No of orientations to use (6)
mult - Multiplying factor between successive scales (2.5-3)
minwavelength - Wavelength of smallest scale filter (2)
sigmaonf - Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center frequency (0.55)
dthetaonsigma - Ratio of angular interval between filter orientations
and the standard deviation of the angular Gaussian (1)
function used to construct filters in the freq. plane.
k - No of standard deviations of noise to reject 2-3
softness - Degree of soft thresholding (0-hard to 1-soft)
```
The convolutions are done via the FFT. Many of the parameters relate
to the specification of the filters in the frequency plane. Most
arguments do not need to be changed from the defaults and are mostly
not that critical. The main parameter that you may wish to play with
is `k`, the number of standard deviations of noise to reject.
"""
function ppdenoise(img::AbstractArray{T1,2}; nscale::Integer=5, norient::Integer=6,
mult::Real=2.5, minwavelength::Real = 2, sigmaonf::Real = 0.55,
dthetaonsigma::Real = 1.0, k::Real=3, softness::Real=1.0) where T1 <: Real
#=
Reference:
Peter Kovesi, "Phase Preserving Denoising of Images".
The Australian Pattern Recognition Society Conference: DICTA'99.
December 1999. Perth WA. pp 212-217
http://www.peterkovesi.com/papers/denoise.pdf
=#
# ** Should try a version of this code using monogenic filters **
epsilon = 1e-5 # Used to prevent division by zero.
thetaSigma = pi/norient/dthetaonsigma # Calculate the standard deviation of the
# angular Gaussian function used to
# construct filters in the freq. plane.
(rows,cols) = size(img)
IMG = fft(img)
# Generate grid data for constructing filters in the frequency domain
(freq, fx, fy) = filtergrids(rows,cols)
(sintheta, costheta) = gridangles(freq, fx, fy)
totalEnergy = zeros(ComplexF64,rows,cols) # response at each orientation.
filter = zeros(rows,cols)
angfilter = zeros(rows,cols)
EO = zeros(ComplexF64, rows,cols)
aEO = zeros(rows,cols)
RayMean = 0.0; RayVar = 0.0; # make main scope
for o = 1:norient # For each orientation.
angl = (o-1)*pi/norient # Calculate filter angle.
# Generate angular filter
angfilter = gaussianangularfilter(angl, thetaSigma, sintheta, costheta)
wavelength = minwavelength # Initialize filter wavelength.
for s = 1:nscale
# Construct the filter = logGabor filter * angular filter
fo = 1.0/wavelength
for n in eachindex(freq)
filter[n] = loggabor(freq[n], fo, sigmaonf) * angfilter[n]
end
# Convolve image with even an odd filters returning the result in EO
EO .= ifft(IMG .* filter)
aEO .= abs.(EO)
if s == 1
# Estimate the mean and variance in the amplitude
# response of the smallest scale filter pair at this
# orientation. If the noise is Gaussian the amplitude
# response will have a Rayleigh distribution. We
# calculate the median amplitude response as this is a
# robust statistic. From this we estimate the mean
# and variance of the Rayleigh distribution
RayMean = median(aEO) * 0.5 * sqrt(-pi/log(0.5))
RayVar = (4-pi)*(RayMean.^2)/pi
end
# Compute soft threshold noting that the effect of noise
# is inversely proportional to the filter bandwidth/centre
# frequency. (If the noise has a uniform spectrum)
T = (RayMean + k*sqrt(RayVar))/(mult^(s-1))
for n in eachindex(aEO)
if aEO[n] > T
# Complex noise vector to subtract = T * normalize(EO)
# times degree of 'softness'
V = softness*T*EO[n]/(aEO[n] + epsilon)
EO[n] -= V # Subtract noise vector.
totalEnergy[n] += EO[n]
# else
# aEO is less than T so this component makes no contribution to totalEnergy
end
end
wavelength *= mult # Wavelength of next filter
end # for each scale
end # for each orientation
return real.(totalEnergy)
end
# Version for an array of Gray elements
function ppdenoise(img::AbstractArray{T1,2}; nscale::Integer=5, norient::Integer=6,
mult::Real=2.5, minwavelength::Real = 2, sigmaonf::Real = 0.55,
dthetaonsigma::Real = 1.0, k::Real=3, softness::Real=1.0) where T1 <: Gray
fimg = Float64.(img)
return ppdenoise(fimg; nscale=nscale, norient=norient,
mult=mult, minwavelength=minwavelength, sigmaonf=sigmaonf,
dthetaonsigma=dthetaonsigma, k=k, softness=softness)
end
| ImagePhaseCongruency | https://github.com/peterkovesi/ImagePhaseCongruency.jl.git |
Subsets and Splits