licenses
sequencelengths
1
3
version
stringclasses
677 values
tree_hash
stringlengths
40
40
path
stringclasses
1 value
type
stringclasses
2 values
size
stringlengths
2
8
text
stringlengths
25
67.1M
package_name
stringlengths
2
41
repo
stringlengths
33
86
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
14262
#= A mapping is a `Dict{A, <:Integer}` with `A` the type of symbols used (usually `Char`). For example, `Dict('A' => 1, 'C' => 2, ...) Below is a utility function for mappings =# """ compute_mapping(s::AbstractString) Return a `Dict{Int, Char}`: `Dict(i => c for (i,c) in enumerate(s))`. """ function compute_mapping(s::AbstractVector{A}, ::Type{I}=Int) where {A,I<:Integer} @assert allunique(s) "Cannot compute mapping from vector with duplicates - got $s" return Dict{A, I}(c => i for (i, c) in enumerate(s)) end function reverse_mapping(mapping::Dict{T,U}) where {T, U} return Dict{U,T}(y => x for (x, y) in mapping) end ####################################################################################### ####################################### Alphabet ###################################### ####################################################################################### """ struct Alphabet{A,I} characters::Vector{A} char_to_index::Dict{A, I} index_to_char::Dict{I, A} default_char = nothing default_index end Structure allowing the mapping from biological symbols of type `A` to integers of type `I`. The typical use case would be `Alphabet{Char, Int}`. `Alphabet` can be constructed - from a `Vector` of symbols and an optional type `I`, *e.g.* `Alphabet(['A','C','G','T'], UInt8)::Alphabet{Char, UInt8}` - from a `String` and an optional type, *e.g.* `Alphabet("ACGT")` - from a mapping `Dict{A, I}` where `I<:Integer`: `Alphabet(Dict('A'=>1, 'C'=>2))` - from a `Symbol`, using default alphabets, *e.g.* `Alphabet(:nt)` - from an integer, using default alphabets (see `?default_alphabets`). """ @kwdef struct Alphabet{A, I<:Integer} characters::Vector{A} # Alphabet characters char_to_index::Dict{A, I} = compute_mapping(characters) index_to_char::Dict{I, A} = reverse_mapping(char_to_index) default_char::Union{Nothing, A} = nothing default_index::Union{Nothing, I} = _default_index_from_char(default_char, characters) # Constructor with checks function Alphabet{A,I}( characters, char_to_index, index_to_char, default_char, default_index, ) where {A,I} @assert isconcretetype(I) && isconcretetype(A) """\ Use concrete type to build `Alphabet` - got `($A,$I)` """ if !isbitstype(A) @warn "Building alphabet using non bit-type symbols $(A). \ May lead to some issues, for instance when copying." end @assert !(A <: Integer) """\ Symbol type should not be `Integer`. Got $characters of type $A """ @assert length(characters) == length(char_to_index) == length(index_to_char) """\ Inconsistent lengths between `characters`, `char_to_index` and `index_to_char`:\ $(length(characters)) vs $(length(char_to_index)) vs $(length(index_to_char)) """ # are characters and char_to_index consistent @assert all(c -> haskey(char_to_index, c), characters) """\ Incomplete `char_to_index`: some symbols in $characters not in $char_to_index """ @assert all(x -> char_to_index[x[2]] == x[1], enumerate(characters)) """\ Inconsistency between `characters` and `char_to_index`: \ $characters -- $(char_to_index) """ # are char_to_index and index_to_char consistent @assert all(c -> haskey(char_to_index, c), values(index_to_char)) """\ Some symbols in `index_to_char` not in `char_to_index` """ @assert all(i -> haskey(index_to_char, i), values(char_to_index)) """\ Some indices in `char_to_index` not in `index_to_char` """ @assert all(c -> c == index_to_char[char_to_index[c]], keys(char_to_index)) """\ Inconsistent `char_to_index` and `index_to_char`. """ # are defaults ok @assert !xor(isnothing(default_char), isnothing(default_index)) """\ Got `default_char=$(default_char)` and `default_index=$(default_index)`. These should either both be `nothing`, or both something. This error can happen if the proposed `default_char` is not in the alphabet. """ @assert isnothing(default_char) || in(default_char, characters) """\ Default char $(default_char) not found in alphabet characters $(string) Maybe use `default_char = c` with `c` in your alphabet? """ @assert isnothing(default_index) || haskey(index_to_char, default_index) """\ Got `default_index=$(default_index)` \ but alphabet `characters` of length $(length(characters)) """ @assert (isnothing(default_index) && isnothing(default_char)) || index_to_char[default_index] == default_char """\ Defaults are inconsistent with mapping.\ `default_index=$(default_index)` maps to $(char_to_index[default_index]),\ but `default_char=$(default_char)` """ # Checking for duplicates @assert allunique(characters) """Cannot form alphabet from `characters` with duplicates\ - got $characters """ return new{A,I}(characters, char_to_index, index_to_char, default_char, default_index) end end _default_index_from_char(c, characters::AbstractVector) = findfirst(==(c), characters) _default_index_from_char(::Nothing, ::AbstractVector) = nothing ################################################ ############## Equality, copying ############### ################################################ function Base.:(==)(α::Alphabet{A,I}, β::Alphabet{B,J}) where {A,B,I,J} return A == B && I == J && α.characters == β.characters && α.default_char == β.default_char α.default_index == β.default_index α.index_to_char == β.index_to_char && α.char_to_index == β.char_to_index end function Base.hash(alphabet::Alphabet{A,I}, h::UInt) where {A,I} h = hash(Alphabet{A,I}, h) h = hash(alphabet.characters, h) h = hash(alphabet.index_to_char, h) h = hash(alphabet.char_to_index, h) h = hash(alphabet.default_char, h) h = hash(alphabet.default_index, h) return h end function Base.copy(alphabet::Alphabet{A,I}) where {A,I} return Alphabet{A,I}(; characters = copy(alphabet.characters), # assumes A is bit type, otherwise deepcopy char_to_index = copy(alphabet.char_to_index), index_to_char = copy(alphabet.index_to_char), default_char = alphabet.default_char, default_index = alphabet.default_index, ) end Base.convert(::Type{I}, alphabet::Alphabet{A,I}) where {A,I<:Integer} = alphabet Base.convert(::Type{Alphabet{A,I}}, alphabet::Alphabet{A,I}) where {A,I} = alphabet function Base.convert(::Type{J}, α::Alphabet{A,I}) where {A,I,J<:Integer} return Alphabet{A,J}(; characters = copy(α.characters), char_to_index = convert(Dict{A, J}, α.char_to_index), index_to_char = convert(Dict{J, A}, α.index_to_char), default_char = α.default_char, default_index = isnothing(α.default_index) ? nothing : convert(J, α.default_index) ) end function Base.convert(::Type{Alphabet{A,J}}, alphabet::Alphabet{A,I}) where {A,I,J<:Integer} return convert(J, alphabet) end ################################################ ################# Constructors ################# ################################################ function Alphabet( characters::AbstractVector{<:A}, ::Type{I}=Int; kwargs... ) where {A, I<:Integer} return Alphabet{A,I}(; characters, char_to_index = compute_mapping(characters, I), kwargs... ) end function Alphabet(S::AbstractString, ::Type{I}=Int; kwargs...) where I return Alphabet(collect(S), I; kwargs...) end Alphabet(A::Alphabet) = A Alphabet(alphabet::Alphabet{A,I}, ::Type{J}) where {A,I,J} = convert(J, alphabet) function Alphabet(characters::AbstractVector{A}, mapping::Dict{A,I}; kwargs...) where {A,I} return Alphabet{A,I}(; characters, char_to_index = mapping, kwargs... ) end function Alphabet(S::AbstractString, mapping::Dict{<:AbstractChar, I}; kwargs...) where I return Alphabet(collect(S), mapping; kwargs...) end function Alphabet(char_to_index::AbstractDict{A, I}; kwargs...) where {A,I} # cannot do collect(keys(...)) because of undefined ordering of Dict characters = Vector{A}(undef, length(char_to_index)) for (c, i) in char_to_index characters[i] = c end return Alphabet{A,I}(; characters, char_to_index, kwargs...) end ################################################ ################### Defaults ################### ################################################ const _DEFAULT_AA_ALPHABET_STRING = "-ACDEFGHIKLMNPQRSTVWY" const _DEFAULT_NT_ALPHABET_STRING = "-ACGT" const _DEFAULT_NT_ALPHABET_STRING_NOGAP = "ACGT" const _DEFAULT_BINARY_ALPHABET_STRING = "01" const _DEFAULT_ALPHABET_STRING = _DEFAULT_AA_ALPHABET_STRING const aa_alphabet = Alphabet(_DEFAULT_AA_ALPHABET_STRING) const aa_alphabet_names = (:aa, :AA, :aminoacids, :amino_acids) const nt_alphabet = Alphabet(_DEFAULT_NT_ALPHABET_STRING) const nt_alphabet_names = (:nt, :nucleotide, :dna) const binary_alphabet = Alphabet(_DEFAULT_BINARY_ALPHABET_STRING) const binary_alphabet_names = (:binary, :spin) function Alphabet(name::Symbol, ::Type{T}=Int) where T <: Integer return if name in aa_alphabet_names # Alphabet(_DEFAULT_AA_ALPHABET_STRING, T; default_char = '-') convert(Alphabet{Char, T}, aa_alphabet) elseif name in nt_alphabet_names # Alphabet(_DEFAULT_NT_ALPHABET_STRING, T; default_char = '-') convert(Alphabet{Char, T}, nt_alphabet) elseif name in binary_alphabet_names # Alphabet(_DEFAULT_BINARY_ALPHABET_STRING, T) convert(Alphabet{Char, T}, binary_alphabet) else names = convert( Vector{Any}, vcat(aa_alphabet_names, nt_alphabet_names, binary_alphabet_names) ) error("Unrecognized alphabet name $name - Possible names $names") end end """ default_alphabet(q::Int, T::Type) - if `q==21`, amino acids - if `q==5`, nucleotides - if `q==4`, nucleotides without gaps - if `q==2`, binary (0, 1) - else, if `q<21`, return the restriction of amino acids to the first q sites - if `q>21`, fails """ function default_alphabet(q::Integer, ::Type{T}=Int) where T <: Integer @assert q > 0 "`q` must be strictly positive - got $q" return if q == 2 Alphabet(:binary, T) elseif q == 4 Alphabet(_DEFAULT_NT_ALPHABET_STRING_NOGAP, T) elseif q == 5 Alphabet(:dna, T) elseif 5 < q <= 21 Alphabet(:aa, T) else error("No defined default alphabet for q = $q (<2 or > 21) - provide your own or use `nothing`") end end ################################################ ############ Transforming sequences ############ ################################################ ## Symbol to Int function (alphabet::Alphabet{A,T})(c::A) where {A,T} i = get(alphabet.char_to_index, c, alphabet.default_index) if isnothing(i) error("Symbol $c not in alphabet, and no defaults set.") end return i end function (alphabet::Alphabet{A,T})(S::AbstractVector{<:A}) where {A,T} return map(x -> alphabet(x), S) end function (alphabet::Alphabet{Char,T})(S::AbstractString) where T return map(x -> alphabet(x), collect(S)) end ## Int to Symbol function (alphabet::Alphabet)(x::Integer) c = get(alphabet.index_to_char, x, alphabet.default_char) if isnothing(c) error("$x is not in alphabet range, and no defaults set.") end return c end # If it makes sense, convert a `Vector{Int}` to a string function (alphabet::Alphabet{<:AbstractChar,T} where T<:Integer)(X::AbstractVector{<:Integer}) return prod(map(alphabet, X)) end # Otherwise return an array of symbols (alphabet::Alphabet)(X::AbstractVector{<:Integer}) = map(alphabet, X) (alphabet::Alphabet)(::Missing) = missing # Equivalent to alphabet(X) for Char based alphabets # Otherwise, can be overloaded for the desired effect # Used when writing fasta function to_string( X::AbstractVector{<:Integer}, alphabet::Alphabet{<:AbstractChar,<:Integer}, ) return alphabet(X) end """ translate(x, original_alphabet::Alphabet, new_alphabet::Alphabet) Return the translation in `new_alphabet` of an integer or a vector of integers `x` that is expressed in `original_alphabet`. """ function translate(x::Integer, original_alphabet::Alphabet, new_alphabet::Alphabet) return x |> original_alphabet |> new_alphabet end function translate(X::AbstractVector{<:Integer}, A::Alphabet, B::Alphabet) return map(x -> translate(x, A, B), X) end ################################################ ##################### Misc ##################### ################################################ Base.length(alphabet::Alphabet) = length(alphabet.characters) Base.in(i::Integer, alphabet::Alphabet) = haskey(alphabet.index_to_char, i) Base.in(c::A, alphabet::Alphabet{A,I}) where {A,I} = haskey(alphabet.char_to_index, c) function name(alphabet::Alphabet{Char,I}) where I return if prod(alphabet.characters) == _DEFAULT_AA_ALPHABET_STRING :aa elseif prod(alphabet.characters) == _DEFAULT_NT_ALPHABET_STRING :dna elseif prod(alphabet.characters) == _DEFAULT_BINARY_ALPHABET_STRING :binary else :custom end end function name(alphabet::Alphabet{A,I}) where {A,I} return :custom end Base.getindex(alphabet::Alphabet, i::Integer) = alphabet.index_to_char[i] Base.getindex(alphabet::Alphabet{A,I}, c::A) where {A,I} = alphabet.char_to_index[c] function Base.show(io::IO, alphabet::Alphabet{A,I}) where {A,I} print(io, "Alphabet{$A,$I}: $(alphabet.characters)") end function Base.show(io::IO, x::MIME"text/plain", alphabet::Alphabet{A,I}) where {A,I} println(io, "$(name(alphabet)) Alphabet{$A,$I} with mapping $(alphabet.characters)") end """ symbols(alphabet) Return the vector of symbols/characters used by `alphabet`. """ symbols(alphabet::Alphabet) = alphabet.characters
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
2296
""" hamming(x, y; normalize=true, positions=nothing) Hamming distance between `Vectors` `x` and `y`. Only sites in vector `positions` will be considered. """ function hamming( X::AbstractVector{<:Integer}, Y::AbstractVector{<:Integer}; normalize=true, positions = nothing, ) @assert length(X) == length(Y) """Expect vectors of same length. Instead $(length(X)) != $(length(Y))""" H = if isnothing(positions) sum(zip(X, Y)) do (x,y) x != y end else sum(positions) do i X[i] != Y[i] end end Z = (isnothing(positions) ? length(X) : length(positions)) return normalize ? H / Z : H end function hamming(X::Alignment, i::Integer, Y::Alignment, j::Integer) @warn "Not implemented yet" return nothing end """ pairwise_hamming(X, Y; step=1, step_left, step_right, as_vec=true, kwargs...) pairwise_hamming(X; kwargs...) Return all hamming distances between sequences of `X` and `Y`. In the second form, consider pairs of sequences in `X`. Only consider sequences every `step`. `step_left` and `step_right` can be used to skip sequence either in `X` or in `Y`. This is useful for large alignment, as the number of computations grows with the product of the size of the alignments By default, the return value is a vector organized like `[H(1,2), H(1,3), ..., H(M-1, M)]` with `H` standing for hamming distance and `M` for the number of sequences. If a matrix is prefered, use `as_vec=false` Extra keyword arguments are passed to `hamming`. """ function pairwise_hamming( X::AbstractAlignment, Y::AbstractAlignment; step=1, step_left=step, step_right=step, as_vec=true, kwargs... ) if Alphabet(X) != Alphabet(Y) @warn """Alignments do not have the same alphabet. Are you sure? Left aln: $(Alphabet(X)) Right aln: $(Alphabet(Y)) """ end X_sequences = eachsequence(X; skip = step_left) Y_sequences = eachsequence(Y; skip = step_right) D = [hamming(x, y; kwargs...) for x in X_sequences, y in Y_sequences] return if as_vec M = size(D, 1) [D[i,j] for i in 1:M for j in (i+1):M] else D end end pairwise_hamming(X::AbstractAlignment; kwargs...) = pairwise_hamming(X, X; kwargs...)
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
3281
function f1(A::AbstractAlignment, w::AbstractVector{Float64}) @assert sum(w) ≈ 1 "Weights must sum to 1 - Instead $(sum(w))" L, M = size(A) q = length(Alphabet(A)) X = A.data' # M x L - for faster iteration f = zeros(Float64, q, L) for i in 1:L, m in 1:M f[X[m,i], i] += w[m] end return f end function f1(X::AbstractAlignment) M = size(X, 2) return f1(X, ones(Float64, M)/M) end f1(X::AbstractAlignment, ::Nothing) = f1(X) # f1(X::AbstractAlignment, weights::AbstractVector) = f1(X, weights) # pretty sure this does nothing? # if weights comes directly from `compute_weights` function f1(X::AbstractAlignment, weights::Tuple{Any,Any}) return f1(X, weights[1]) end """ site_specific_frequencies(X::AbstractAlignment[, weights=X.weights]; as_vec=false) Return the site specific frequencies of `X`. If `as_vec`, the result is a vector of length `Lxq`. Otherwise, it is a matrix of `q` rows and `L` columns (default). """ function site_specific_frequencies(X, w=X.weights; as_vec=false) f = f1(X, w) if as_vec q, L = size(f) return reshape(f, q*L) end return f end function consensus(X::AbstractAlignment, w=X.weights) f = site_specific_frequencies(X, w; as_vec=false) cons = map(argmax, eachcol(f)) return Alignment(cons, Alphabet(X); names = ["consensus"]) end function f2(A::AbstractAlignment, w::AbstractVector) @assert sum(w) ≈ 1 "Weights must sum to 1 - Instead $(sum(w))" L, M = size(A) q = length(Alphabet(A)) X = A.data' f = zeros(Float64, q, q, L, L) for i in 1:L, m in 1:M f[X[m,i], X[m,i], i, i] += w[m] for j in (i+1):L f[X[m, i], X[m, j], i, j] += w[m] f[X[m, j], X[m, i], j, i] += w[m] end end return f end f2(X) = f2(X, ones(Float64, size(X,2))/size(X,2)) f2(X, ::Nothing) = f2(X) f2(X, weights::Tuple{Any,Any}) = f2(X, weights[1]) """ pairwise_frequencies(X::AbstractAlignment, w=X.weights; as_mat=false) Return a `q x q x L x L` tensor. The `(a, b, i, j)` element is the fraction of sequences for which we see `a` at position `i` and `b` at position `j`. If `as_mat=true`, will return a `qL x qL` matrix, with `q x q` blocks representing correlations between two specific columns. """ function pairwise_frequencies(X::AbstractAlignment, w=X.weights; as_mat=false) f = f2(X, w) if as_mat q, L = size(f, 1), size(f, 3) f = permutedims(f, [1, 3, 2, 4]) return reshape(f, q*L, q*L) end return f end """ pairwise_correlations(X, w=X.weights; as_mat=false) Compute connected correlations: the difference between the pairwise frequencies and the product of the single site frequencies. See `?pairwise_frequencies` for the shape of the output. """ function pairwise_correlations(X::AbstractAlignment, w=X.weights; as_mat=false) f1 = site_specific_frequencies(X, w) f2 = pairwise_frequencies(X, w) q, L = size(f1) C = zeros(Float64, q, q, L, L) for i in 1:L, j in (i+1):L, a in 1:q, b in 1:q C[a, b, i, j] = f2[a, b, i, j] - f1[a, i]*f1[b, j] C[b, a, j, i] = C[a, b, i, j] end if as_mat C = permutedims(C, [1, 3, 2, 4]) return reshape(C, q*L, q*L) end return C end
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
1635
""" compute_weights(X::AbstractAlignment, θ = 0.2; normalize = true) Compute phylogenetic correction weights for sequences of `X`. The weight sequence `S` is `1/N`, where `N` is the number of sequences in `X` at hamming distance less than `H` from `S` (including `S` itself). The threshold `H` is `floor(θ⋅L)` where `L` is the sequence length. The return value is a tuple `(weights, Meff)`, where `Meff` is the sum of weights (pre-normalization). If `normalize`, weights are normalized to sum to one. . """ function compute_weights(X::AbstractAlignment, θ = 0.2; normalize = true) return if θ == 0. M = sequence_number(X) w = ones(M) normalize ? (w/sum(w), M) : (w, M) else compute_weights(X.data, θ; normalize) end end """ compute_weights!(X, θ; kwargs...) Compute and set weights for `X`. See `compute_weights`. """ function compute_weights!(X, θ = 0.2; kwargs...) w, Meff = compute_weights(X, θ; kwargs...) X.weights = w return w, Meff end function compute_weights( Y::AbstractMatrix{<:Integer}, theta::Float64; normalize=true, ) L, M = size(Y) threshold = Int(floor(L * theta)) weights = ones(Int, M); d = 0 @inbounds for m in 1:M, l in (m+1):M d = 0 # distance i = 1 # index in sequence while d < threshold && i <= L (Y[i,m] != Y[i,l]) && (d += 1) i += 1 end if d < threshold weights[m]+=1 weights[l]+=1 end end weights = 1 ./ weights Meff = sum(weights) return (normalize ? weights / Meff : weights, Meff) end
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
511
using BioSequenceMappings using DCAUtils # to compare weights using Random using Test here = dirname(@__FILE__) @testset "BioSequenceMappings.jl" begin @testset "Alphabet" begin println("# Alphabet") include(joinpath(here, "./alphabets/test.jl")) end @testset "Alignment" begin println("# Alignment") include(joinpath(here, "./alignments/test.jl")) end @testset "IO" begin println("# IO") include(joinpath(here, "./io/test.jl")) end end
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
4503
@testset "Constructors without alphabet" begin @testset "Basic" begin data = Matrix{Int}([1 2 3; 2 3 4]) alignment = Alignment(data; verbose=false) @test typeof(alignment) == Alignment{Char, Int} # should autofind nt alphabet @test size(alignment.data) == (2, 3) @test length(alignment.weights) == 3 @test length(alignment.names) == 3 @test Alphabet(alignment) == BioSequenceMappings.default_alphabet(4, Int) end @testset "With type" begin data = Matrix{Int8}([1 2 3; 2 3 4]) alignment = Alignment(data; verbose=false) @test typeof(alignment) == Alignment{Char,Int8} @test Alphabet(alignment) == BioSequenceMappings.default_alphabet(4, Int8) end @testset "Without alphabet" begin data = Matrix{Int8}([1 2 3; 2 3 4]) alignment = Alignment(data; alphabet = nothing) @test typeof(alignment) == Alignment{Nothing, Int8} @test isnothing(alignment.alphabet) end end @testset "Constructors with explicit alphabet" begin data = Matrix{Int16}([1 2 3; 2 3 4]) @testset "Basic" begin alphabet = Alphabet("ACGT", Int16) alignment = Alignment(data, alphabet) @test typeof(alignment) == Alignment{Char, Int16} @test Alphabet(alignment) == alphabet end @testset "Mismatched types - Alphabet gets priority" begin alphabet = Alphabet("ACGT", Int8) alignment = Alignment(data, alphabet) @test typeof(alignment) == Alignment{Char, Int8} @test Alphabet(alignment) == alphabet end @testset "From alphabet constructor" begin alignment = Alignment(data, "BCDE") @test typeof(alignment) == Alignment{Char, Int16} @test Alphabet(alignment) == Alphabet("BCDE", Int16) end @testset "With keywords" begin weights = [1/4, 1/4, 1/2] names = ["A", "B", "CD"] alignment = Alignment(data, :dna; weights, names) @test Alphabet(alignment) == Alphabet(:dna, Int16) @test alignment.weights == weights @test alignment.names == names end @testset "From vector of vectors" begin vecdat = Matrix{Int}([1 2 3; 2 3 4]) |> eachcol |> collect alignment = Alignment(vecdat, :dna) @test typeof(alignment) == Alignment{Char, Int} @test size(alignment) == (2, 3) # three sequences of length 2 @test Alphabet(alignment) == Alphabet(:dna, Int) end @testset "From vector of integers" begin sequence = Int16[1,2,3,4,5] alignment = Alignment(sequence, :dna) @test typeof(alignment) == Alignment{Char, Int16} @test size(alignment) == (5, 1) @test Alphabet(alignment) == Alphabet(:dna, Int16) end end @testset "Constructor with keyword alphabet" begin data = Matrix{Int}([1 2 3; 2 3 4]) @testset "Explicit alphabet" begin alphabet = Alphabet("ACGT", Int) alignment = Alignment(data; alphabet) @test typeof(alignment) == Alignment{Char, Int} @test Alphabet(alignment) == alphabet end @testset "Symbol" begin alignment = Alignment(data; alphabet=:dna) @test Alphabet(alignment) == Alphabet(:dna) end @testset "Vector of vector data" begin data = [[1,2], [3,4]] alignment = Alignment(data; alphabet = :dna) @test size(alignment) == (2,2) @test alignment[1] == [1,2] @test Alphabet(alignment) == Alphabet(:dna) end @testset "Vector of integers data" begin data = [3,4] alignment = Alignment(data; alphabet = :dna) @test size(alignment) == (2,1) @test alignment[1] == [3,4] @test Alphabet(alignment) == Alphabet(:dna) end end @testset "Test for errors" begin data = Matrix{Int}([1 2 3; 2 3 21]) @test Alignment(data; verbose=false).alphabet == default_alphabet(21) @test_throws AssertionError Alignment(data, :dna) @test_throws AssertionError Alignment(data; weights = [1/2, 1/4, 1/3], verbose=false) @test_throws AssertionError Alignment(data; weights = [1., 1., -1.], verbose=false) @test_throws AssertionError Alignment(data; weights = [1/2, 1/4, 1/4, 0.], verbose=false) @test_throws AssertionError Alignment(data; weights = [1/2, 1/4, 1/3], names = ["A"], verbose=false) data = Matrix{Int}([1 2 3; 2 3 22]) @test_throws ErrorException Alignment(data; verbose=false) @test Alignment(data; alphabet=:none).alphabet == nothing end
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
1360
@testset "Iterating" begin data = Matrix{Int16}([1 2 3; 2 3 4]) X = Alignment(data; verbose=false) @test size(X) == (2, 3) @test length(X) == 3 @test sum(X) == Int16[6, 9] # sum of sequences, meaningless but it's for test @test map(sum, X) == Int16[3, 5, 7] # sum of each sequence, also meaningless @test findall(x -> x==[1,2], X) == [1] @test first(Iterators.reverse(X)) == [3, 4] end @testset "Indexing" begin data = Matrix{Int16}([1 2 3; 2 3 4]) X = Alignment(data; verbose=false) @test X[1] == Int16[1, 2] @test X[1:2] == X.data[:, 1:2] @test X[:] == X.data @test X[end] == Int16[3, 4] end @testset "Subsample" begin data = Matrix{Int16}([1 2 3; 2 3 4]) X = Alignment(data; verbose=false) Y = subsample(X, 1) @test typeof(Y) == typeof(X) @test size(Y) ==(2, 1) @test Y.alphabet == X.alphabet @test Y[1] == X[1] @test Y[1] !== X[1] # different in memory Y = subsample(X, 1:2) @test typeof(Y) == typeof(X) @test size(Y) == (2, 2) @test Y.alphabet == X.alphabet @test Y[1] == X[1] && Y[2] == X[2] @test Y[1] !== X[1] && Y[2] !== X[2] Y = subsample(X, 3:-1:2) @test typeof(Y) == typeof(X) @test size(Y) == (2, 2) @test Y[1] == X[3] && Y[2] == X[2] Y = subsample(X, :) @test all(i -> X[i] == Y[i], eachindex(Y)) end
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
613
@testset "weights" begin Random.seed!(42) Y = Int8.(rand(1:2, 10, 100)) X = Alignment(Y; verbose=false) for θ in 0:.2:1 (w1, M1) = BioSequenceMappings.compute_weights(Y, θ; normalize=false) (w2, M2) = DCAUtils.compute_weights(Y, θ; verbose=false) @test w1 ≈ w2 @test M1 ≈ M2 @test sum(BioSequenceMappings.compute_weights(Y, .4; normalize=true)[1]) ≈ 1 @test BioSequenceMappings.compute_weights(X, θ; normalize=false)[1] ≈ w1 end end @testset "find sequence" begin @warn "I should write a test for `find_sequence` and `match_sequences`" end
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
258
basedir = dirname(@__FILE__) @testset "Basics" begin include(joinpath(basedir, "basics.jl")) end @testset "Interfaces" begin include(joinpath(basedir, "interfaces.jl")) end @testset "Methods" begin include(joinpath(basedir, "methods.jl")) end
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
7762
# Very basic tests @testset "Alphabet constructor tests" begin # Test valid input @testset "Constructor String + Dict" begin alphabet = Alphabet("ACGT-", Dict{Char, Int}('A'=>1, 'C'=>2, 'G'=>3, 'T'=>4, '-'=>5)) @test alphabet.characters == collect("ACGT-") @test alphabet.char_to_index == Dict{Char, Int}('A'=>1, 'C'=>2, 'G'=>3, 'T'=>4, '-'=>5) @test alphabet.index_to_char == Dict{Int, Char}(1=>'A', 2=>'C', 3=>'G', 4=>'T', 5=>'-') @test isa(alphabet, Alphabet{Char,Int}) end @testset "Constructor Vec + Dict - custom struct for characters" begin struct T c::Char end alphabet = Alphabet([T('A'), T('C')], Dict{T, Int}(T('A')=>1, T('C')=>2)) @test alphabet.characters == [T('A'),T('C')] @test alphabet.char_to_index == Dict{T, Int}(T('A')=>1, T('C')=>2) @test alphabet.index_to_char == Dict{Int, T}(1=>T('A'), 2=>T('C')) @test isa(alphabet, Alphabet{T,Int}) end # Test constructor with Dict input @testset "Constructor Dict" begin alphabet = Alphabet(Dict('A'=>1, 'C'=>2, 'G'=>3, 'T'=>4)) @test alphabet.characters == collect("ACGT") @test alphabet.char_to_index == Dict('A'=>1, 'C'=>2, 'G'=>3, 'T'=>4) end # Test constructor with different lengths for string and mapping @test_throws AssertionError Alphabet("-ACGT", Dict('A'=>1, 'C'=>2, 'G'=>3)) # Test constructor with incomplete mapping @test_throws AssertionError Alphabet("-ACGT", Dict('A'=>1, 'C'=>2, 'G'=>3, 'T'=>4, 'X'=>5)) # Test constructor with empty string @testset "Empty string" begin alphabet = Alphabet("") isempty(alphabet.characters) && alphabet.char_to_index == Dict() end # Test constructor from string alone @testset "String constructor" begin alphabet = Alphabet("AB") @test typeof(alphabet) == Alphabet{Char, Int} @test alphabet.characters == collect("AB") @test alphabet.char_to_index == Dict{Char, Int}('A' => 1, 'B' => 2) end # Test constructor from string alone -- not integer types @testset "String constructor - Int8" begin alphabet = Alphabet("AB", Int8) @test typeof(alphabet) == Alphabet{Char,Int8} @test alphabet.characters == collect("AB") @test alphabet.char_to_index == Dict{Char, Int8}('A' => 1, 'B' => 2) end # the test below wants to test overflow of Int8 for strings with diverse characters @testset "Unicode string + Int8" begin long_unicode_string = "≔⪉➰Ⅰ⚃⻘⧓⌏☷∉⻔⛴⿐ⱞ➒℃➷⮡⦊⻌⁆ⷢⅽ➫ⰹⓑⲂ⎛➻⮝ⷕ⮥⇑∇ⳣ⍹\u20c2⾣Ⲥⵀ➶⒰⨼⾧Ⓥⷵ〉⣀♈⽺⺆⦤⟹⁍⯕\u2fe4ⳘⱣ⡭⯐⪘℅⺈ⷽ⠴⇽⼝⿂⋅ⵥ␠⬯Ɀ⢒⢏⽹Ⓝⲵ⤢ⶕ⌲ↆ⑂⤻⚲\u2fe8▰⹋⦮⬄≢⣔⫿⅑Ⲏ❠\u2065▢⊺ⵡℙ⠰ⴈⱯ⎽☊ₜ⢳⧷⽌⯸⎾⡀⡚⍁⾱⨷⺊ℯ⼨⸱⸃⠷⛠⾢⤮ⱀ⒡⟽ⷃ⒋⑲⤆⑯⬊ⲁ☫\u2d29⬬⥄╟ⶮ ⢋♼⧂⧽⾰⫖⧼⡕⤦⎂Ⓨ◎⽩➛▍⮄␟♛ⵉ⺭▔╯ℂ✉⌃⤅K⽙⸐Ⓜ⣩⦘Ⱂℇ∞⽐ⴏ⮻☠┓⪡⾶ⓢ⻀≲┬⣅⦓⧈⤬≱⚀⫠⑾⅌" @test_throws InexactError Alphabet(long_unicode_string, Int8) alphabet = Alphabet(long_unicode_string, Int16) @test typeof(alphabet) == Alphabet{Char, Int16} end # Test throw for abstract type @test_throws AssertionError Alphabet("ABC", Signed) # Test throw for non unique alphabet string @test_throws AssertionError Alphabet("AAA") # Test constructor with non-Char keys in mapping @test_throws MethodError Alphabet("-ACGT", Dict('A'=>1, 'C'=>2, 'G'=>3, 4=>4)) end @testset "Equality and hash" begin A1 = Alphabet(:dna, Int16) A2 = Alphabet(:dna, Int16) # same data and same type B = Alphabet(:dna, Int8) # same data and different type C = Alphabet(:aa, Int16) # different data and same type @test A1 == A2 @test hash(A1) == hash(A2) @test A1 != B && hash(A1) != hash(B) end @testset "Copy, convert" begin # Testing the `copy` function @testset "copy" begin original_alphabet = Alphabet("ACGT") copied_alphabet = copy(original_alphabet) # Check that the copied alphabet is equal to the original (using data) @test copied_alphabet == original_alphabet # Check that the copied alphabet is not the same object as the original (in memory) @test copied_alphabet !== original_alphabet end # Testing the `convert` function @testset "convert function" begin original_alphabet = Alphabet("ACGT") # Convert to the same type should return the same object with === (Base.convert) @test convert(Alphabet{Char, Int}, original_alphabet) === original_alphabet @test convert(Int, original_alphabet) === original_alphabet # Convert to a different type should return a new object with the same content converted_alphabet = convert(Alphabet{Char,UInt8}, original_alphabet) @test converted_alphabet.characters == original_alphabet.characters @test converted_alphabet.char_to_index == original_alphabet.char_to_index @test converted_alphabet.index_to_char == original_alphabet.index_to_char # Check that the converted alphabet is not the same object as the original @test converted_alphabet !== original_alphabet # Check the second formulation of convert @test convert(UInt8, original_alphabet) == converted_alphabet end end @testset "Defaults" begin local BSM = BioSequenceMappings @test Alphabet(:dna).characters == collect(BSM._DEFAULT_NT_ALPHABET_STRING) @test Alphabet(:nt).characters == collect(BSM._DEFAULT_NT_ALPHABET_STRING) @test Alphabet(:nucleotide).characters == collect(BSM._DEFAULT_NT_ALPHABET_STRING) @test Alphabet(:aa).characters == collect(BSM._DEFAULT_AA_ALPHABET_STRING) @test Alphabet(:amino_acids).characters == collect(BSM._DEFAULT_AA_ALPHABET_STRING) @test Alphabet(:aminoacids).characters == collect(BSM._DEFAULT_AA_ALPHABET_STRING) @test Alphabet(:AA, Int8).characters == collect(BSM._DEFAULT_AA_ALPHABET_STRING) @test Alphabet(:spin).characters == collect(BSM._DEFAULT_BINARY_ALPHABET_STRING) @test Alphabet(:binary).characters == collect(BSM._DEFAULT_BINARY_ALPHABET_STRING) @test typeof(Alphabet(:spin, Int8)) == Alphabet{Char, Int8} @test_throws ErrorException Alphabet(:some_symbol) @test default_alphabet(4).characters == collect(BSM._DEFAULT_NT_ALPHABET_STRING_NOGAP) @test default_alphabet(21).characters == collect(BSM._DEFAULT_AA_ALPHABET_STRING) @test default_alphabet(5).characters == collect(BSM._DEFAULT_NT_ALPHABET_STRING) @test default_alphabet(14).characters == collect(BSM._DEFAULT_AA_ALPHABET_STRING) @test_throws ErrorException default_alphabet(22) @test_throws ErrorException default_alphabet(1) end @testset "Sequence to int" begin @test begin s = "TGCA" alphabet = Alphabet(:dna) X = alphabet(s) X == [5,4,3,2] && typeof(X) == Vector{Int} end @test begin s = "TGCA" alphabet = Alphabet(:dna, Int8) X = alphabet(s) X == [5,4,3,2] && typeof(X) == Vector{Int8} end @test_throws ErrorException begin s = "-TGCAB" alphabet = Alphabet(:dna, Int8) alphabet(s) end @test begin s = "AB12" alphabet = Alphabet("CBA321", UInt8) X = alphabet(s) X == [3, 2, 6, 5] && typeof(X) == Vector{UInt8} end end @testset "Int to sequence" begin @test begin X = Int[1,2,3,4] alphabet = Alphabet(:dna) alphabet(X) == "-ACG" end @test begin X = Int8[5,4,3,2] alphabet = Alphabet(:dna, Int16) # Int16 should not matter here alphabet(X) == "TGCA" end @test_throws ErrorException begin X = [1,2,3,4,5,6] alphabet = Alphabet("-ACGT") alphabet(X) end # Test with UTF8 @test begin X = [1,2,3,4] alphabet = Alphabet("τ4ν2") alphabet(X) == "τ4ν2" end end
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
code
1054
basedir = dirname(@__FILE__) @testset "Alphabet from fasta" begin auto_alph = BioSequenceMappings.auto_alphabet_from_fasta # convenience @test auto_alph(joinpath(basedir, "toy_fasta_aa.fasta")) == Alphabet(:aa) @test auto_alph(joinpath(basedir, "toy_fasta_dna.fasta")) == Alphabet(:dna) @test auto_alph(joinpath(basedir, "toy_fasta_bin.fasta")) == Alphabet(:binary) @info "Tests: warning will be produced below" @test !in( auto_alph(joinpath(basedir, "toy_fasta_else.fasta")), [Alphabet(:dna), Alphabet(:aa), Alphabet(:binary)] ) end @testset "Writing and reading" begin # test whether FASTX handles long names long_name = repeat("1", 250) short_name = "Hello" seq = [1,2,3,4] A = Alignment([seq, reverse(seq)], alphabet = :nt, names = [long_name, short_name]) write(joinpath(basedir, "aln.fasta"), A) B = read_fasta(joinpath(basedir, "aln.fasta"), alphabet = :nt) @test find_sequence(long_name, B) == (1, seq) @test find_sequence(short_name, B) == (2, reverse(seq)) end
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
docs
1901
# BioSequenceMappings [![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://PierreBarrat.github.io/BioSequenceMappings.jl/stable/) [![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://PierreBarrat.github.io/BioSequenceMappings.jl/dev/) The aim of the package is to facilitate the task of converting biological sequences (nucleotides, amino acids) to integers or to onehot representation. It also provides simple function to manipulate alignments or to compute simple statistics. *Note*: I do not frequently use the onehot format, so for now it is not documented / not well tested. I'll develop it more when I get the time. ## Installation From a Julia session: ```julia using Pkg Pkg.add("BioSequenceMappings") using BioSequenceMappings ``` ## Usage Check the [documentation](https://pierrebarrat.github.io/BioSequenceMappings.jl/dev/) for more information. Below are some examples. ### Filter sequences by Hamming distance Load an alignment, find all sequences with a Hamming distance smaller than 66% to the first one, create a new alignment object from them and save it to a file. ```julia using BioSequenceMappings A = read_fasta("example/PF00014.fasta"); size(A) # 100 sequences of length 53 s0 = A[1]; indices = findall(s -> hamming(s, s0; normalize=true) < 0.66, A) B = subsample(A, indices) write("example/PF00014_subsample.fasta", B) ``` ### Remove columns with too many gaps Load an alignment, find columns with more than 5% gaps and remove them, then create a new alignment object. ```julia using BioSequenceMappings A = read_fasta("example/PF00014.fasta"); # uses the default alphabet `aa_alphabet` gap_digit = A.alphabet('-') f1 = site_specific_frequencies(A) non_gapped_colums = findall(j -> f1[gap_digit, j] <= 0.05, 1:size(f1, 2)) B = Alignment(A.data[non_gapped_colums, :], A.alphabet; A.names) # sequences are stored as columns ```
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
docs
5479
```@meta DocTestSetup = quote using BioSequenceMappings end ``` # The `Alignment` type An [`Alignment`](@ref) essentially contains a set of aligned biological sequences mapped to integers. It is a subtype of the more general `AbstractAlignment`. The precise type is of the form `Alignment{A,T}` where `A` is the type of biological symbols (see [The `Alphabet` type](@ref)) and `T` the type of integers. Its fields are simple and self-explanatory: - `data::Matrix{T}`: the mapped sequences. Each column of the matrix is one sequence. - `alphabet::Alphabet{A,T}`: the alphabet defining the mapping from symbols to integers. - `weights::Vector{Float64}`: weights associated with each sequence. DCA-like methods assign phylogenetic weights to sequences when infering models. - `names::Vector{String}`: names of the sequences, corresponding to the description string if the alignment was built from a FASTA file. The dimensions of an alignment can be obtained using `size`: length $\cross$ number of sequences ## Reading & writing For now, the only file format that this package interacts with is FASTA. Reading an alignment is simple: ```@repl align1 using BioSequenceMappings # hide A = read_fasta("../../example/PF00014.fasta") size(A) # length and number of sequences ``` When reading from FASTA, the choice of the alphabet is made by reading the first five sequences, and comparing the observed characters with the list of default alphabets (see [The `Alphabet` type](@ref)). If they fit one of the defaults, it will be used. Otherwise, an alphabet will be created *ad hoc*: ```@repl strange_characters using BioSequenceMappings # hide A = read_fasta("../../example/strange_characters.fasta"); # warning produced because no default alphabet was found A.alphabet |> symbols |> prod ``` Writing to a FASTA file is just as easy: ```@repl strange_characters write("new_fasta_file.fasta", A) # or... open("new_fasta_file.fasta", "w") do io write(io, A) end ``` ## Accessing & iterating Sequences can be accessed by indexing. Indexing using a range will return a view in the underlying `data` matrix. ```@repl align1 A[1] # the first sequence of the alignment size(A[1:5]) # 5 sequences of length L ``` !!! tip When indexing or when iterating, the return value is a *reference* to the sequence and not a copy. For this reason, if you modify the sequence, the alignment itself will be modified ```@repl using BioSequenceMappings # hide A = read_fasta("../../example/PF00014.fasta") # hide s = A[1]' s[1] = 21 A[1]' # the first element has been modified ``` The fact that calls like `A[1:5]` return a matrix-like object can be inconvenient. To iterate over sequences as vectors of integers, one can use the [`eachsequence`](@ref) function. It takes the alignment as a first argument and optionally indices, and return an iterator. ```@repl align1 for s in eachsequence(A) # s: vector of integers end map(length, eachsequence(A, 1:5:16)) # length of sequences 1, 6, 11. 16 collect(eachsequence(A; skip=25)) # collect as list of vectors, taking one every 25 sequences ``` If the name of the sequence is needed, the iterator [`named_sequences`](@ref) can be used instead, taking the same arguments and iterating over tuples of the form `(name, sequence)`. ## Finding sequences The package provides [`find_sequence`](@ref) to find sequences by name in an alignment, and [`match_sequences`](@ref) to match all sequences with a particular name pattern. ```jldoctest align2; setup = :(example_dir = joinpath(dirname(pathof(BioSequenceMappings)), "../example")) julia> A = read_fasta(joinpath(example_dir, "toy_fasta_dna.fasta")); julia> n = A.names[1] # name of the first sequence in the alignment "sequence_1" julia> find_sequence(n, A) # index and sequence (1, [5, 3, 4, 2, 4, 1, 5, 1, 5, 5]) julia> indices, sequences = match_sequences(r"sequence_[0-9]", A); # using a regex julia> indices 5-element Vector{Int64}: 1 2 3 4 5 ``` !!! note `find_sequence` searches for an exact match. It is based on `findfirst`, and will thus return the first match it finds. If nothing is found, then the result is `nothing`. On the other hand, `match_sequences` is based on `occursin` and `findall`. The returned sequences are references to original objects, as when indexing. ## Creating subalignments The call `subsample(A, indices)` will create an `AbstractAlignment` of the same type as `A` by taking only the sequences at `indices`. It uses the same alphabet as `A` and copies over the names of the sequences. Note that [`subsample`](@ref) *copies* the underlying data, creating a completely independent object. ```@repl align3 using BioSequenceMappings # hide A = read_fasta("../../example/toy_fasta_dna.fasta") A[1][1] # first character of the first sequence B = subsample(A, 1:2:5) B[1][1] = 5 A[1][1] # A remains unchanged ``` With [`subsample_random`](@ref), it is also possible to create a random subalignment by picking sequences from the original one. For now, this is only possible without replacement, *i.e.* the same sequence cannot be picked twice. To just pick one sequence at random without creating a new alignment object, just call `rand`. ```@repl align3 subsample_random(A, 3) # new alignment using three random sequences from A subsample_random(A, 12) # sampling without replacement: this will error since size(A, 1) < 12 rand(A) # one random sequence from A (returns a view) ``` ## OneHotAlignment TBA
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
docs
5141
```@setup alpha1 using BioSequenceMappings ``` ```@meta DocTestSetup = quote using BioSequenceMappings A = Alphabet(['A', 'C', 'G', 'T', '-']) end ``` # The `Alphabet` type ## Basics An `Alphabet` contains information necessary to map biological symbols to integers and inversely. The full type is `Alphabet{A,I}`, where `A` is the type of biological symbols (typically `Char`) and `I` is a subtype of `Integer`. The simplest way to create an alphabet is from a list of symbols: ```@repl alpha1 A = Alphabet(['A', 'C', 'G', 'T', '-']) ``` The created alphabet `A` associates `Char` correspondings to nucleotides to `Int` according to the index at which they appear in the input vector: `'A' => 1`, `'C' => 2`, etc... Note that we could have created the same alphabet from a string (since it's based on `Char`s) or from a dictionary: ```jldoctest alpha2 julia> B = Alphabet("ACGT-"); julia> C = Alphabet(Dict('A'=>1, 'C'=>2, 'G'=>3, 'T'=>4, '-'=>5)); julia> A == B true julia> A == C true ``` The alphabet is used to map symbols to integers and inversely. This is done by calling the object directly, as a function: ```jldoctest alpha2 julia> A('A') # mapping Char to Int 1 julia> A(1) # mapping Int to Char 'A': ASCII/Unicode U+0041 (category Lu: Letter, uppercase) julia> A("A-GT") # mapping a string to a vector 4-element Vector{Int64}: 1 5 3 4 julia> A([1,2,3]) # mapping a vector to a string "ACG" ``` If needed, the mapping is accessible using the `symbols`function: ```jldoctest alpha2 julia> symbols(A) 5-element Vector{Char}: 'A': ASCII/Unicode U+0041 (category Lu: Letter, uppercase) 'C': ASCII/Unicode U+0043 (category Lu: Letter, uppercase) 'G': ASCII/Unicode U+0047 (category Lu: Letter, uppercase) 'T': ASCII/Unicode U+0054 (category Lu: Letter, uppercase) '-': ASCII/Unicode U+002D (category Pd: Punctuation, dash) julia> A |> symbols |> prod # as a string "ACGT-" ``` ## Default alphabets The package comes with three default alphabets: - an amino-acid alphabet `aa_alphabet` using the mapping `"-ACDEFGHIKLMNPQRSTVWY"`; - a nucleotide alphabet `nt_alphabet` using the mapping `"-ACGT"`; - a "binary" alphabet `BioSequenceMappings.binary_alphabet`, which I found useful for simulations, with the mapping: `"01"`. These can be also be accessed by calling `Alphabet(name)` where `name` is a symbol corresponding to any of the default alphabets. The symbolic names can be easily be found: ```jldoctest alpha2 julia> BioSequenceMappings.aa_alphabet_names # also works with nt and binary alphabets (:aa, :AA, :aminoacids, :amino_acids) julia> Alphabet(:aa) == aa_alphabet true julia> Alphabet(:amino_acids)([1,2,3]) "-AC" ``` Each default alphabet is also associated to a specific cardinality of biological symbols through the function `default_alphabet`. This means that an integer vector with elements ranging from 1 to `q` will be associated to the following alphabets: ```juliadoctest alpha2 julia> default_alphabet(2) == Alphabet(:binary) # q == 2 true julia> default_alphabet(5) == Alphabet(:nt) # q == 5 true julia> default_alphabet(21) == Alphabet(:aa) # 5 < q <= 21 true julia> default_alphabet(15) == Alphabet(:aa) # 5 < q <= 21 ``` This association is useful to create `Alignment` objects from a matrix of integers without having to specify the alphabet manually. ## Default characters When reading biological sequences, it can be convenient to associate all unexpected characters to a default symbol, for instance the gap. This can be achieved by providing the `default_char` keyword argument when constructing the alphabet: ```@repl alpha1 A_default = Alphabet("ACGT-"; default_char = '-') A_default("ABCDEF") # 'unknown' chars are mapped to '-', in turn mapped to 5 A("ABCDEF") # if no defaults are provided, fails ``` This also works the other way around: integers that are not in the range of the alphabet are mapped to the default symbol: ```@repl alpha1 A_default(1:10) # indices larger than 5 are mapped to the gap A(1:10) # if no defaults are provided, fails ``` ## Using specific integer types When created as above, the alphabet will default to using `Int` as the integer type. If dealing with large amounts of data, it can be beneficial to use a more sober type. This is done by providing an extra argument of the desired type when constructing the alphabet: ```@repl alpha1 B = Alphabet("ACGT-", UInt8) B == A B("A-") ``` ## Translating between alphabets It often happens to me that I have an integer vector `X` representing a sequence, but with a mapping different from the one I am used to. The [`translate`](@ref) function lets me convert it to another integer vector with the right mapping. ```jldoctest alpha2 julia> strange_alphabet = Alphabet("TCGA-"); # the default is "-ACGT" julia> X = Int[2, 2, 5, 4, 5]; # representing the sequence "CC-A-" according to the above julia> strange_alphabet(X) "CC-A-" julia> nt_alphabet(X) # this is obviously wrong - nt_alphabet uses "-ACGT" "AATGT" julia> Y = translate(X, strange_alphabet, nt_alphabet) 5-element Vector{Int64}: 3 3 1 2 1 julia> nt_alphabet(Y) # now this works "CC-A-" ```
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
docs
1697
```@meta CurrentModule = BioSequenceMappings ``` # BioSequenceMappings The aim of the package is to facilitate the task of converting biological sequences (nucleotides, amino acids) to integers or to onehot representation. It also provides simple function to manipulate alignments or to compute simple statistics. *Note*: I do not frequently use the onehot format, so for now it is not documented / not well tested. I'll develop it more when I get the time. ## Installation From the Julia REPL: ```@repl using Pkg Pkg.add("https://github.com/PierreBarrat/BioSequenceMappings.jl") using BioSequenceMappings ``` ## Usage ### Filter sequences by Hamming distance Load an alignment, find all sequences with a Hamming distance smaller than 66% to the first one, create a new alignment object from them and save it to a file. ```@repl example_1 using BioSequenceMappings A = read_fasta("../../example/PF00014.fasta"); size(A) # 100 sequences of length 53 s0 = A[1]; s0' # transpose for legibility indices = findall(s -> hamming(s, s0; normalize=true) < 0.66, A) B = subsample(A, indices) write("PF00014_subsample.fasta", B) ``` ### Remove columns with too many gaps Load an alignment, find columns with more than 5% gaps and remove them, then create a new alignment object. ```@repl example_2 using BioSequenceMappings # hide A = read_fasta("../../example/PF00014.fasta"); # uses the default alphabet `aa_alphabet` gap_digit = A.alphabet('-') f1 = site_specific_frequencies(A) non_gapped_colums = findall(j -> f1[gap_digit, j] <= 0.05, 1:size(f1, 2))' # transpose for legibility B = Alignment(A.data[non_gapped_colums', :], A.alphabet; A.names) # sequences are stored as columns ```
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
docs
227
```@meta CurrentModule = BioSequenceMappings ``` Documentation for [BioSequenceMappings](https://github.com/PierreBarrat/BioSequenceMappings.jl). ```@index ``` ```@autodocs Modules = [BioSequenceMappings] Private = false ```
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "MIT" ]
0.1.3
e1032040d1be0b6a9bcfba77379c6384e301b24b
docs
1799
# Utilities ## Hamming distance [`hamming(x, y)`](@ref) returns the normalized Hamming distance between two integer vectors. ## Alignment statistics ### Pairwise Hamming distance [`pairwise_hamming(A)`](@ref) returns a vector containing all Hamming distances between pairs of sequences in `A`. For large alignments, it is often practical to consider only a subset of Hamming distances: the `step=n` keyword can be used to only consider every nth sequence. The function is also adapted to two alignments: [`pairwise_hamming(A,B)`] will consider all pairs of sequences with one member in `A` and the other one in `B`. ### Statistics - the profile of the alignment: [`site_specific_frequencies(A)`](@ref) returns a `q x L` matrix where `q` is the size of the alphabet and `L` the length of sequences, with element `(a, i)` being the fraction of sequences in which character `a` was found at position `i`. - the pairwise frequencies: [`pairwise_frequencies(A)`](@ref) - the pairwise correlations: [`pairwise_correlations(A)`](@ref) All these functions accept a vector of weights as a second argument, and will by default use `A.weights` if it is not provided. ### Weights In DCA-like algorithms, it is customary to weight sequences by their degree of phylogenetic relations. Typically, the weight of a sequence is inversely proportional to the number of other sequences with a Hamming distance smaller than some threshold. For an alignment `X`, computing the weights is done using [`compute_weights`](@ref): ```@repl using BioSequenceMappings # hide A = read_fasta("../../example/toy_fasta_dna.fasta") compute_weights(A) # compute and return the weight vector, as well as the effective number of sequences compute_weights!(A); # same, but sets the weights field in A A.weights ```
BioSequenceMappings
https://github.com/PierreBarrat/BioSequenceMappings.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
5806
using MoYe, CUDA, Test @views function matmul_kernel(A, sA_layout, gmem_copy_A, smem_copy_A, B, sB_layout, gmem_copy_B, smem_copy_B, C, mma_C) sA = MoYeSharedArray(eltype(A), sA_layout) sB = MoYeSharedArray(eltype(B), sB_layout) mA = MoYeArray(A) mB = MoYeArray(B) mC = MoYeArray(C) bM = size(sA_layout, 1) bN = size(sB_layout, 1) bK = size(sB_layout, 2) gA = @tile mA (bM, bK) (blockIdx().x, :) gB = @tile mB (bN, bK) (blockIdx().y, :) gC = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # gmem copy partition gmem_thr_copy_a = get_slice(gmem_copy_A, threadIdx().x) tAgA = partition_S(gmem_thr_copy_a, gA) # (CPY, CPY_M, CPY_K, k) tAsA = partition_D(gmem_thr_copy_a, sA) # (CPY, CPY_M, CPY_K) gmem_thr_copy_b = get_slice(gmem_copy_B, threadIdx().x) tBgB = partition_S(gmem_thr_copy_b, gB) # (CPY, CPY_N, CPY_K, k) tBsB = partition_D(gmem_thr_copy_b, sB) # (CPY, CPY_N, CPY_K) # mma partition thr_mma = get_slice(mma_C, threadIdx().x) tCsA = partition_A(thr_mma, sA) # (MMA, MMA_M, MMA_K) tCsB = partition_B(thr_mma, sB) # (MMA, MMA_M, MMA_K) tCgC = partition_C(thr_mma, gC) # (MMA, MMA_M, MMA_N) tCrA = make_fragment_A(thr_mma, tCsA) # (MMA, MMA_M, MMA_K) tCrB = make_fragment_B(thr_mma, tCsB) # (MMA, MMA_N, MMA_K) tCrC = make_fragment_C(thr_mma, tCgC) # (MMA, MMA_M, MMA_N) zeros!(tCrC) # retile smem_thr_copy_A = get_slice(smem_copy_A, threadIdx().x) smem_thr_copy_B = get_slice(smem_copy_B, threadIdx().x) tCsA_retiled = partition_S(smem_thr_copy_A, sA) tCsB_retiled = partition_S(smem_thr_copy_B, sB) tCrA_retiled = retile_D(smem_thr_copy_A, tCrA) tCrB_retiled = retile_D(smem_thr_copy_B, tCrB) k_tile_max = size(tAgA, 4) @cuprintln "k_tile_max: $k_tile_max" k_tile = 1 # for k_tile in 1:k_tile_max # copy from global to shared copyto!(gmem_copy_A, tAsA, view(tAgA, :, :, :, k_tile)) copyto!(gmem_copy_B, tBsB, view(tBgB, :, :, :, k_tile)) cp_async_wait() sync_threads() # copy from shared to registers copyto!(smem_copy_A, tCrA_retiled, tCsA_retiled) copyto!(smem_copy_B, tCrB_retiled, tCsB_retiled) # MoYe.copyto_unpack!(MoYe.CopyTraits{LDSM_U32x4_N}(), view(tCrB_retiled, (:,_1), _1, _1), view(tCsB_retiled, (:,_1), _1, _1)) if threadIdx().x == 1 @cuprintln "Thread 1" @cuprintln Int32(sB[17,1]), Int32(sB[17,2]), Int32(sB[17,3]), Int32(sB[17,4]), Int32(sB[17,5]), Int32(sB[17,6]), Int32(sB[17,7]), Int32(sB[17,8]), Int32(sB[17,9]), Int32(sB[17,10]), Int32(sB[17,11]), Int32(sB[17,12]), Int32(sB[17,13]), Int32(sB[17,14]), Int32(sB[17,15]), Int32(sB[17,16]) @cuprintln Int32(sB[18,1]), Int32(sB[18,2]), Int32(sB[18,3]), Int32(sB[18,4]), Int32(sB[18,5]), Int32(sB[18,6]), Int32(sB[18,7]), Int32(sB[18,8]), Int32(sB[18,9]), Int32(sB[18,10]), Int32(sB[18,11]), Int32(sB[18,12]), Int32(sB[18,13]), Int32(sB[18,14]), Int32(sB[18,15]), Int32(sB[18,16]) @cuprintln Int32(tCrB[1,1,1]), Int32(tCrB[2,1,1]), Int32(tCrB[1,2,1]), Int32(tCrB[2,2,1]) @cuprintln Int32(tCrB_retiled[1,1,1]), Int32(tCrB_retiled[2,1,1]), Int32(tCrB_retiled[3,1,1]), Int32(tCrB_retiled[4,1,1]) end if threadIdx().x == 25 @cuprintln "Thread 25" @cuprintln Int32(tCsB_retiled[1]), Int32(tCsB_retiled[2]), Int32(tCsB_retiled[3]), Int32(tCsB_retiled[4]) end @gc_preserve gemm!(mma_C, tCrC, tCrA, tCrB, tCrC) @inbounds tCrC[1] # compiler bug, have to load after copyto! sync_threads() # end copyto!(tCgC, tCrC) @inbounds tCrC[1] # compiler bug, have to load after copyto! sync_threads() return nothing end function matmul(A, B, C) bM = _32 bN = _32 bK = _16 sA_atom_layout = @Layout (32, 8) (1, 32) sB_atom_layout = @Layout (8, 16) (16, 1) sA_layout = MoYe.tile_to_shape(sA_atom_layout, (bM, bK)) sB_layout = MoYe.tile_to_shape(sB_atom_layout, (bN, bK)) TA = eltype(A) TB = eltype(B) TC = eltype(C) gmem_copy_A = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{UInt128}, TA}(), @Layout((4, 8)), @Layout((4, 1))) gmem_copy_B = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{UInt128}, TB}(), @Layout((8, 4), (4, 1)), @Layout((1, 4))) mma = make_tiled_mma(MMAOP_16x8x8_F32TF32TF32F32_TN(), @Layout((1,2,1)), (_32, _32, _8)) # Note: A is M-major so we can only use `UniversalCopy` smem_copy_A = make_tiled_copy_A(CopyAtom{UniversalCopy{TA}, TA}(), mma) smem_copy_B = make_tiled_copy_B(CopyAtom{LDSM_U32x4_N, TB}(), mma) threads = Int(size(mma)) blocks = (cld(size(A, 1), bM), cld(size(B, 1), bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, sA_layout, gmem_copy_A, smem_copy_A, B, sB_layout, gmem_copy_B, smem_copy_B, C, mma) end function test() M = 32 K = 16 N = 32 A = CuArray(reshape(collect(1:M*K) .* 1f0, (M,K))) B = CuArray(reshape(collect(1:N*K) .* 1f0, (K,N))) # K-major C = CuArray(ones(Float32, (M,N))) matmul(A, B', C) CUDA.synchronize() @test C == A * B CUDA.unsafe_free!(A) CUDA.unsafe_free!(B) CUDA.unsafe_free!(C) end test()
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1880
using MoYe using Documenter DocMeta.setdocmeta!(MoYe, :DocTestSetup, :(using MoYe); recursive=true) makedocs(; modules=[MoYe], authors="MilkshakeForReal <[email protected]> and contributors", repo="https://github.com/YichengDWu/MoYe.jl/blob/{commit}{path}#{line}", warnonly = Documenter.except(:autodocs_block, :cross_references, :docs_block, :doctest, :eval_block, :example_block, :footnote, :linkcheck_remotes, :linkcheck, :meta_block, :parse_error), sitename="MoYe.jl", format=Documenter.HTML(; prettyurls=get(ENV, "CI", "false") == "true", canonical="https://YichengDWu.github.io/MoYe.jl", edit_link="main", assets=String[]), pages=[ "Home" => "index.md", "Manual" => [ "Layout" => "manual/layout.md", "Array" => "manual/array.md", "Broadcasting" => "manual/broadcast.md", "MatMul" => "manual/matmul.md", # "Data Movement" => [ # "Global Memory & Shared Memory" => "manual/datamovement/gs.md", # ] "TiledCopy & TiledMMA" => "manual/tiled_matmul.md", "Memcpy Async" => "manual/async.md", "Pipeline" => "manual/pipeline.md", "Tensor Cores" => "manual/tensor_core.md", ], "API Reference" => [ "Layout" => "api/layout.md", "MoYeArray" => "api/array.md", "Tiling" => "api/tiling.md", "Data Movement" => "api/copy.md", "MMA/Copy Atoms" => "api/atom.md", ], ]) deploydocs(; repo="github.com/YichengDWu/MoYe.jl", push_preview=true, devbranch="main")
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
3230
using MoYe, CUDA, Test using MoYe: @loopinfo function matmul_kernel(A, blocklayout_A, threadlayout_A, B, blocklayout_B, threadlayout_B, C, blocklayout_C, threadlayout_C) sA = MoYeSharedArray(eltype(A), blocklayout_A) sB = MoYeSharedArray(eltype(B), blocklayout_B) X = MoYe.One() M = size(A, 1) N = size(B, 1) K = size(A, 2) mA = MoYeArray(A, (M, K)) mB = MoYeArray(B, (N, K)) mC = MoYeArray(C, (M, N)) bM = size(blocklayout_A, 1) bN = size(blocklayout_B, 1) bK = size(blocklayout_B, 2) blocktile_A = @tile mA (bM, bK) (blockIdx().x, :) # (bM,bK,k) blocktile_B = @tile mB (bN, bK) (blockIdx().y, :) # (bN,bK,k) blocktile_C = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # (bM,bN) # Load data A and B from gmem to smem a and b threadtile_gA = @parallelize blocktile_A threadlayout_A threadIdx().x # (tM,tK,k) threadtile_sA = @parallelize sA threadlayout_A threadIdx().x # (tM,tK) threadtile_gB = @parallelize blocktile_B threadlayout_B threadIdx().x # (tN,tK,k) threadtile_sB = @parallelize sB threadlayout_B threadIdx().x # (tN,tK) # For mma computation computetile_sA = @parallelize sA threadlayout_C threadIdx().x (X, :) computetile_sB = @parallelize sB threadlayout_C threadIdx().x (:, X) computetile_gC = @parallelize blocktile_C threadlayout_C threadIdx().x frg_c = make_fragment_like(computetile_gC) zeros!(frg_c) k_max = size(threadtile_gA, 3) for i in 1:k_max # copy gmem to smem copyto!(threadtile_sA, view(threadtile_gA, :, :, i)) copyto!(threadtile_sB, view(threadtile_gB, :, :, i)) cp_async_wait() sync_threads() # classic three nested for loops for k in axes(computetile_sA, 2) @loopinfo unroll for m in axes(computetile_sA, 1) @loopinfo unroll for n in axes(computetile_sB, 1) @inbounds frg_c[m, n] += computetile_sA[m, k] * computetile_sB[n, k] end end end sync_threads() end copyto!(computetile_gC, frg_c) return nothing end function matmul(A, B, C) M = size(A, 1) N = size(B, 1) K = size(A, 2) blocklayout_A = @Layout (128, 8) blocklayout_B = @Layout (128, 8) blocklayout_C = @Layout (128, 128) threadlayout_A = @Layout (32, 8) threadlayout_B = @Layout (32, 8) threadlayout_C = @Layout (32, 8) threads = Int(size(threadlayout_C)) bM = size(blocklayout_A, 1) bN = size(blocklayout_B, 1) blocks = (cld(M, bM), cld(N, bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, blocklayout_A, threadlayout_A, B, blocklayout_B, threadlayout_B, C, blocklayout_C, threadlayout_C) end function test() A = CUDA.randn(Float32, 2048, 256) B = CUDA.randn(Float32, 2048, 256) C = CUDA.randn(Float32, 2048, 2048) matmul(A, B, C) @test C == A * B' CUDA.unsafe_free!(A) CUDA.unsafe_free!(B) CUDA.unsafe_free!(C) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
2945
module MoYe using Static: StaticInt, static, dynamic, is_static, One, Zero, known, repr import Static import ManualMemory, LayoutPointers import StrideArraysCore import StaticArrayInterface using StrideArraysCore: static_length, static_size, static_axes using StrideArraysCore: @gc_preserve using CUDA, BFloat16s, LLVM using CUDA: @device_override using LLVMLoopInfo: @loopinfo using Core: LLVMPtr import Adapt using MacroTools: @capture import LinearAlgebra #import LinearAlgebra include("utilities.jl") include("algorithm/tuple_algorithms.jl") include("int_tuple.jl") include("stride.jl") include("layout.jl") include("engine.jl") include("array.jl") include("broadcast.jl") include("algorithm/array_algorithms.jl") include("pointer.jl") # Arch include("arch/mma/mma.jl") include("arch/mma/make_mma_ops.jl") include("arch/copy/copy.jl") include("arch/copy/copy_async.jl") include("arch/copy/ldmatrix.jl") # Traits include("traits/mma.jl") include("traits/copy.jl") include("traits/cp_async.jl") include("traits/ldmatrix.jl") # Atom include("atom/mma.jl") include("atom/copy.jl") include("atom/ldmatrix.jl") include("algorithm/copy.jl") include("algorithm/blas.jl") # Deprecations include("deprecated.jl") # Device include("device/smem.jl") include("print.jl") # rexport export static, @gc_preserve, static_size, @loopinfo # tuple algorithms export flatten, unflatten export colex_less, elem_less, increment, capacity export coord_to_index, index_to_coord, coord_to_coord, compact_col_major, compact_row_major, GenColMajor, GenRowMajor, @Layout # Layout export Layout, StaticLayout, make_layout, shape, rank, depth, cosize, composition, complement, logical_product, blocked_product, raked_product, zipped_product, tiled_product, logical_divide, zipped_divide, tiled_divide, zeros!, recast, right_inverse, left_inverse, tile_to_shape export print_layout, print_typst # MoYeArray export ArrayEngine, ViewEngine, MoYeArray, make_fragment_like, @parallelize, @tile, zeros! export MoYeSharedArray # Traits export MMATraits, shape_mnk, thr_id, layout_a, layout_b, layout_c, layout_d # Atom export CopyAtom, make_tiled_copy, get_slice, partition_D, partition_S, UniversalFMA, UniversalCopy, CPOP_ASYNC_CACHEALWAYS, CPOP_ASYNC_CACHEGLOBAL export MMAAtom, make_tiled_mma, partition_C, partition_A, partition_B, tile_size, partition_fragment_C, partition_fragment_A, partition_fragment_B, make_tiled_copy_A, make_tiled_copy_B, make_tiled_copy_C, make_fragment_A, make_fragment_B, make_fragment_C, retile_D, retile_S # pointer export isgmem, issmem, isrmem # blas export axpby!, gemm! # data movement export cp_async_wait, cp_async_commit # constants export _0, _1, _2, _3, _4, _5, _6, _8, _9, _10, _16, _32, _64, _128, _256, _512, _1024, _2048, _4096, _8192 end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
12433
""" MoYeArray(engine::Engine, layout::Layout) MoYeArray{T}(::UndefInitializer, layout::StaticLayout) MoYeArray(ptr, layout::Layout) Create a MoYeArray from an engine and a layout. See also [`ArrayEngine`](@ref) and [`ViewEngine`](@ref). ## Examples ```julia julia> slayout = @Layout (5, 2); julia> array_engine = ArrayEngine{Float32}(undef, cosize(slayout)); # owning array julia> MoYeArray(array_engine, slayout) 5×2 MoYeArray{Float32, 2, ArrayEngine{Float32, 10}, Layout{2, Tuple{Static.StaticInt{5}, Static.StaticInt{2}}, Tuple{Static.StaticInt{1}, Static.StaticInt{5}}}}: -3.24118f12 0.0 7.57f-43 0.0 0.0 0.0 0.0 0.0 7.89217f-40 0.0 julia> MoYeArray{Float32}(undef, slayout) 5×2 MoYeArray{Float32, 2, ArrayEngine{Float32, 10}, Layout{2, Tuple{Static.StaticInt{5}, Static.StaticInt{2}}, Tuple{Static.StaticInt{1}, Static.StaticInt{5}}}}: 4.0f-45 7.57f-43 0.0 0.0 -1.81623f7 0.0 7.57f-43 0.0 -1.81623f7 0.0 julia> A = ones(10); julia> MoYeArray(pointer(A), slayout) # non-owning array 5×2 MoYeArray{Float64, 2, ViewEngine{Float64, Ptr{Float64}}, Layout{2, Tuple{Static.StaticInt{5}, Static.StaticInt{2}}, Tuple{Static.StaticInt{1}, Static.StaticInt{5}}}}: 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 julia> function test_alloc() # when powered by a ArrayEngine, MoYeArray is stack-allocated slayout = @Layout (2, 3) # and mutable x = MoYeArray{Float32}(undef, slayout) fill!(x, 1.0f0) return sum(x) end test_alloc (generic function with 2 methods) julia> @allocated(test_alloc()) 0 ``` """ struct MoYeArray{T, N, E <: Engine{T}, L <: Layout{N}} <: AbstractArray{T, N} engine::E layout::L @inline function MoYeArray(engine::Engine{T}, layout::Layout{N}) where {T, N} return new{T, N, typeof(engine), typeof(layout)}(engine, layout) end end @inline function MoYeArray{T}(::UndefInitializer, l::StaticLayout) where {T} return MoYeArray(ArrayEngine{T}(undef, cosize(l)), l) end @inline function MoYeArray{T}(::UndefInitializer, l::Layout) where {T} throw(ArgumentError("Owning `MoYeArray` cannot be created from a dynamic layout")) end @inline function MoYeArray{T}(::UndefInitializer, shape::Union{StaticInt, StaticIntTuple}, args...) where {T} l = make_layout(shape, args...) return MoYeArray(ArrayEngine{T}(undef, cosize(l)), l) end @inline function MoYeArray{T}(f::Function, l::StaticLayout) where {T} return MoYeArray(ArrayEngine{T}(f, cosize(l)), l) end @inline function MoYeArray{T}(f::Function, shape::Union{StaticInt, StaticIntTuple}, args...) where {T} l = make_layout(shape, args...) return MoYeArray(ArrayEngine{T}(f, cosize(l)), l) end @inline function MoYeArray(ptr::Ptr{T}, layout::Layout) where {T} engine = ViewEngine(ptr) return MoYeArray(engine, layout) end @inline function MoYeArray(ptr::Ptr{T}, shape::GenIntTuple, args...) where {T <: Number} l = make_layout(shape, args...) return MoYeArray(ptr, l) end @inline function MoYeArray(ptr::LLVMPtr{T}, layout::Layout) where {T} engine = ViewEngine(ptr) return MoYeArray(engine, layout) end @inline function MoYeArray(ptr::LLVMPtr{T}, shape::GenIntTuple, args...) where {T} return MoYeArray(ptr, make_layout(shape, args...)) end @inline function MoYeArray(x::LinearAlgebra.Transpose) return MoYeArray(pointer(x.parent), make_layout(size(x), GenRowMajor)) end @inline function MoYeArray(x::LinearAlgebra.Adjoint) return MoYeArray(pointer(x.parent), make_layout(size(x), GenRowMajor)) end @inline function MoYeArray(x::AbstractArray) return MoYeArray(pointer(x), make_layout(size(x), GenColMajor)) end #@inline function MoYeArray(x::StaticArrayInterface.StaticArray) # return MoYeArray(pointer(x), make_layout(StaticArrayInterface.static_size(x))) #end @inline MoYeArray(x::AbstractArray, args...) = MoYeArray(pointer(x), args...) @inline MoYeArray(x::LinearAlgebra.Adjoint, args...) = MoYeArray(pointer(x.parent), args...) @inline MoYeArray(x::LinearAlgebra.Transpose, args...) = MoYeArray(pointer(x.parent), args...) const BitMoYeArray{N, E, L} = MoYeArray{Bool, N, E, L} const StaticMoYeArray{T, N, A} = MoYeArray{T, N, A, <:Layout{N, <:StaticIntTuple}} # only size needs to be static const OwningArray{T, N, L} = MoYeArray{T, N, <:ArrayEngine, L} const NonOwningArray{T, N, L} = MoYeArray{T, N, <:ViewEngine, L} const StaticOwningArray{T, N, L} = MoYeArray{T, N, <:ArrayEngine, <:Layout{N, <:StaticIntTuple}} const StaticNonOwningArray{T, N, L} = MoYeArray{T, N, <:ViewEngine, <:Layout{N, <:StaticIntTuple}} const LocalArray{T, N, L} = MoYeArray{T, N, ViewEngine{T, Ptr{T}}, L} const SharedArray{T, N, L} = MoYeArray{T, N, ViewEngine{T, LLVMPtr{T, AS.Shared}}, L} engine(x::MoYeArray) = getfield(x, :engine) layout(x::MoYeArray) = getfield(x, :layout) layout(::Type{<:StaticMoYeArray{T,N,E,L}}) where {T,N,E,L} = L @inline Base.elsize(x::MoYeArray{T}) where {T} = sizeof(T) @inline Base.sizeof(x::MoYeArray) = Base.elsize(x) * length(x) @inline Base.size(x::MoYeArray) = tuple(dynamic(map(capacity, shape(layout(x))))...) @inline Base.size(x::MoYeArray, i::StaticInt) = size(layout(x), i) @inline Base.length(x::MoYeArray) = x |> layout |> shape |> capacity |> dynamic @inline Base.strides(x::MoYeArray) = stride(layout(x)) # note is might be static @inline Base.stride(x::MoYeArray, i::IntType) = getindex(stride(layout(x)), i) @inline rank(x::MoYeArray) = rank(layout(x)) @inline depth(x::MoYeArray) = depth(layout(x)) @inline shape(x::MoYeArray) = shape(layout(x)) @inline shape(x::Type{<:MoYeArray{T, N, E, L}}) where {T, N, E, L} = shape(L) # static interface @inline StaticArrayInterface.static_size(x::StaticMoYeArray) = map(capacity, shape(layout(x))) @inline StaticArrayInterface.static_size(x::A, i::Union{Int, StaticInt}) where {A<:StaticMoYeArray}= size(layout(x), i) @inline function StaticArrayInterface.static_axes(x::StaticMoYeArray{T,N,<:ViewEngine}) where {T,N} return map(Base.oneto, static_size(x)) end @inline StaticArrayInterface.static_axes(x::StaticMoYeArray) = static_axes(MoYeArray(pointer(x), layout(x))) @inline Base.axes(x::StaticMoYeArray) = static_axes(x) @inline Base.axes(x::StaticMoYeArray, i::StaticInt) = static_axes(x, i) @inline function ManualMemory.preserve_buffer(A::MoYeArray) return ManualMemory.preserve_buffer(engine(A)) end @inline function Base.unsafe_convert(::Type{Ptr{T}}, A::MoYeArray{T}) where {T} return Base.unsafe_convert(Ptr{T}, engine(A)) end @inline function Base.pointer(A::MoYeArray) return pointer(engine(A)) end """ pointer(A::MoYeArray, i::Integer) Return a pointer to the element at the logical index `i` in `A`, not the physical index. """ @inline function Base.pointer(x::MoYeArray{T}, i::IntType) where {T} offset = coord_to_index0(x.layout, i-one(i)) return pointer(x) + offset*sizeof(T) end Base.IndexStyle(::Type{<:MoYeArray}) = IndexLinear() Base.@propagate_inbounds function Base.getindex(x::OwningArray, ids::Union{Integer, StaticInt, IntTuple}...) @boundscheck checkbounds(x, ids...) # should fail if ids is hierarchical index = layout(x)(ids...) b = ManualMemory.preserve_buffer(x) GC.@preserve b begin @inbounds ViewEngine(pointer(x))[index] end end Base.@propagate_inbounds function Base.getindex(x::NonOwningArray, ids::Union{Integer, StaticInt, IntTuple}...) @boundscheck checkbounds(x, ids...) index = layout(x)(ids...) @inbounds engine(x)[index] end # strictly for fixing print a vector Base.@propagate_inbounds function Base.getindex(x::MoYeArray{T, 1}, row::Int, col::Int) where {T} @inline return getindex(x, row) end Base.@propagate_inbounds function Base.setindex!(x::OwningArray, val, ids::Union{Integer, StaticInt, IntTuple}...) @boundscheck checkbounds(x, ids...) index = layout(x)(ids...) b = ManualMemory.preserve_buffer(x) GC.@preserve b begin @inbounds ViewEngine(pointer(x))[index] = val end end Base.@propagate_inbounds function Base.setindex!(x::NonOwningArray, val, ids::Union{Integer, StaticInt, IntTuple}...) @boundscheck checkbounds(x, ids...) index = layout(x)(ids...) @inbounds engine(x)[index] = val end function Adapt.adapt_structure(to, x::MoYeArray) data = Adapt.adapt_structure(to, engine(x)) return MoYeArray(data, layout(x)) end function Adapt.adapt_storage(::Type{MoYeArray{T, N, A}}, xs::AT) where {T, N, A, AT <: AbstractArray} return Adapt.adapt_storage(A, xs) end @inline StrideArraysCore.maybe_ptr_array(A::MoYeArray) = MoYeArray(ViewEngine(pointer(A)), layout(A)) # Array operations @inline function Base.view(x::MoYeArray{T, N}, coord::Vararg{Colon, N}) where {T, N} b = ManualMemory.preserve_buffer(x) GC.@preserve b begin MoYeArray(pointer(x), layout(x)) end end @inline function Base.view(x::MoYeArray{T, N}, coord::Colon) where {T, N} return view(x, repeat(:, N)...) end @inline function Base.view(x::MoYeArray{T}, coord...) where {T} b = ManualMemory.preserve_buffer(x) GC.@preserve b begin sliced_layout, offset = slice_and_offset(layout(x), coord) MoYeArray(pointer(x) + offset * sizeof(T), sliced_layout) end end @inline function Base.similar(::Type{T}, x::MoYeArray{S,N,E,L}) where {T,S,N,E,Shape<:GenStaticIntTuple,L<:Layout{N, Shape}} return MoYeArray{T}(undef, make_layout_like(x.layout)) end @inline function Base.similar(x::MoYeArray{S,N,E,L}) where {S,N,E,Shape<:GenStaticIntTuple,L<:Layout{N, Shape}} return similar(S, x) end function transpose(x::MoYeArray) return MoYeArray(pointer(x), transpose(x.layout)) end """ append_dim(x::MoYeArray, N::StaticInt) -> MoYeArray Add dimension to the end of the array to N. """ function append_dim(x::MoYeArray, N::StaticInt) return MoYeArray(pointer(x), append(x.layout, N)) end """ append_dim(x::MoYeArray, N::StaticInt) -> MoYeArray Add dimension to the front of the array to N. """ function prepend_dim(x::MoYeArray, N::StaticInt) return MoYeArray(pointer(x), prepend(x.layout, N)) end """ recast(::Type{NewType}, x::MoYeArray{OldType}) -> MoYeArray{NewType} Recast the element type of a MoYeArray. This is similar to `Base.reinterpret`, but dose all the computation at compile time, if possible. ## Examples ```julia julia> x = MoYeArray{Int32}(undef, @Layout((2,3))) 2×3 MoYeArray{Int32, 2, ArrayEngine{Int32, 6}, Layout{2, Tuple{Static.StaticInt{2}, Static.StaticInt{3}}, Tuple{Static.StaticInt{1}, Static.StaticInt{2}}}}: -1948408944 0 2 514 -268435456 0 julia> x2 = recast(Int16, x) 4×3 MoYeArray{Int16, 2, ViewEngine{Int16, Ptr{Int16}}, Layout{2, Tuple{Static.StaticInt{4}, Static.StaticInt{3}}, Tuple{Static.StaticInt{1}, Static.StaticInt{4}}}}: -23664 0 2 -29731 0 0 514 0 0 0 -4096 0 julia> x3 = recast(Int64, x) 1×3 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{Static.StaticInt{1}, Static.StaticInt{3}}, Tuple{Static.StaticInt{1}, Static.StaticInt{1}}}}: 2209959748496 -1152921504606846976 2 ``` """ @inline function recast(::Type{NewType}, x::MoYeArray{OldType}) where {NewType, OldType} @gc_preserve _recast(NewType, x) end @inline function recast(::Type{OldType}, x::MoYeArray{OldType}) where {OldType} return x end function _recast(::Type{NewType}, x::MoYeArray{OldType}) where {NewType, OldType} @inline old_layout = layout(x) new_layout = recast(old_layout, NewType, OldType) if sizeof(OldType) < sizeof(NewType) # TODO: handle composed layout shape_diff = map(-, flatten(shape(old_layout)), flatten(shape(new_layout))) extent_diff = map(*, shape_diff, flatten(stride(old_layout))) offset = _foldl((i,a)->i+min(a, Zero()), extent_diff, Zero()) return MoYeArray(recast(NewType, pointer(x) + offset * sizeof(OldType)), new_layout) else return MoYeArray(recast(NewType, pointer(x)), new_layout) end end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
2167
using Base.Broadcast: AbstractArrayStyle, DefaultArrayStyle, Broadcasted using Base.Broadcast: _broadcast_getindex, broadcast_shape import Base.Broadcast: instantiate, materialize!, BroadcastStyle, broadcasted struct MoYeArrayStyle{N, Shape, Stride} <: AbstractArrayStyle{N} end BroadcastStyle(::Type{<:StaticMoYeArray{T,N,E,L}}) where {T,N,E,Shape,Stride,L<:Layout{N,Shape,Stride}} = MoYeArrayStyle{N,Shape,Stride}() BroadcastStyle(a::MoYeArrayStyle, ::DefaultArrayStyle) = a BroadcastStyle(a::MoYeArrayStyle{N,S}, b::MoYeArrayStyle{N,S}) where {N,S} = a @generated function BroadcastStyle(a::MoYeArrayStyle{N1, S1}, b::MoYeArrayStyle{N2, S2}) where {N1, S1, N2, S2} if N1 > N2 return :a elseif N1 < N2 return :b else if map(product, make_tuple(S1)) == map(product, make_tuple(S2)) return :a else throw("Don't know how to broadcast MoYeArrays") end end end # currently only defined for static layouts @inline function Base.similar(bc::Broadcasted{MoYeArrayStyle{N,Shape, Stride}}, ::Type{ElType}) where {ElType, N, Shape, Stride} return MoYeArray{ElType}(undef, make_layout_like(make_layout(make_tuple(Shape), make_tuple(Stride)))) end @inline function Base.copyto!(dest::StaticOwningArray{T,N,L}, bc::Broadcasted{MoYeArrayStyle{N,S}}) where {T,N,S,L<:Layout{N,S}} @gc_preserve copyto!(dest, bc) return dest end # have to define these to avoid ambiguities... @inline broadcasted(f::Function, x::StaticOwningArray) = @gc_preserve broadcasted(f, x) @inline broadcasted(f::Function, x::StaticOwningArray, y::StaticOwningArray) = @gc_preserve broadcasted(f, x, y) @inline broadcasted(f::Function, x::StaticOwningArray, y::Number) = @gc_preserve broadcasted(f, x, y) @inline broadcasted(f::Function, x::Number, y::StaticOwningArray) = @gc_preserve broadcasted(f, x, y) @inline broadcasted(f::Function, x::StaticOwningArray, y::StaticNonOwningArray) = @gc_preserve broadcasted(f, x, y) @inline broadcasted(f::Function, x::StaticNonOwningArray, y::StaticOwningArray) = @gc_preserve broadcasted(f, x, y)
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
4206
""" ViewEngine{T, P} A wrapper of a pointer. `P` is the type of the pointer. """ struct ViewEngine{T, P} ptr::P end @inline function ViewEngine(ptr::Ptr{T}) where {T} return ViewEngine{T, typeof(ptr)}(ptr) end @inline function ViewEngine(ptr::LLVMPtr{T, AS}) where {T, AS} return ViewEngine{T, typeof(ptr)}(ptr) end @inline function ViewEngine(A::AbstractArray) p = LayoutPointers.memory_reference(A)[1] return ViewEngine(p) end @inline function ViewEngine(A::ViewEngine) return A end @inline Base.pointer(A::ViewEngine) = getfield(A, :ptr) @inline function Base.unsafe_convert(p::Type{Ptr{T}}, A::ViewEngine{T}) where {T} return Base.unsafe_convert(p, pointer(A)) end @inline function Base.unsafe_convert(p::Type{LLVMPtr{T, AS}}, A::ViewEngine{T}) where {T, AS} return Base.unsafe_convert(p, pointer(A)) end @inline function Base.getindex(A::ViewEngine{T, <:LLVMPtr{T}}, i::IntType) where {T} align = Base.datatype_alignment(T) return unsafe_load(pointer(A), dynamic(i), Val(align)) end @inline function Base.getindex(A::ViewEngine{T}, i::IntType) where {T} return unsafe_load(pointer(A), dynamic(i)) end @inline function Base.setindex!(A::ViewEngine{T, <:LLVMPtr{T}}, val, i::IntType) where {T} align = Base.datatype_alignment(T) return unsafe_store!(pointer(A), val, dynamic(i), Val(align)) end @inline function Base.setindex!(A::ViewEngine{T, <:Ptr{T}}, val, i::IntType) where {T} return unsafe_store!(pointer(A), val, dynamic(i)) end @inline ManualMemory.preserve_buffer(::ViewEngine) = nothing """ ArrayEngine{T, L} <: DenseVector{T} A owning and mutable vector of type `T` with static length `L`. ## Examples ```julia julia> x = ArrayEngine{Float32}(undef, _3) 3-element ArrayEngine{Float32, 3}: -9.8271385f-36 7.57f-43 -9.8271385f-36 julia> x[1] = 10f0 10.0f0 julia> x 3-element ArrayEngine{Float32, 3}: 10.0 7.57f-43 -9.8271385f-36 ``` """ mutable struct ArrayEngine{T, L} <: DenseVector{T} data::NTuple{L, T} @inline ArrayEngine{T, L}(::UndefInitializer) where {T, L} = new{T, L}() @inline function ArrayEngine{T}(::UndefInitializer, ::StaticInt{L}) where {T, L} return ArrayEngine{T, L}(undef) end @inline ArrayEngine(data::NTuple{L, T}) where {T, L} = new{T, L}(data) end @inline function Base.unsafe_convert(::Type{Ptr{T}}, A::ArrayEngine{T}) where {T} return Base.unsafe_convert(Ptr{T}, pointer_from_objref(A)) end @inline function Base.pointer(A::ArrayEngine{T}) where {T} return Base.unsafe_convert(Ptr{T}, pointer_from_objref(A)) end #@device_override @inline function Base.pointer(A::ArrayEngine{T}) where {T} # return Base.bitcast(LLVMPtr{T, AS.Generic}, pointer_from_objref(A)) #end @inline Base.size(::ArrayEngine{T, L}) where {T, L} = (L,) @inline Base.length(::ArrayEngine{T, L}) where {T, L} = L @inline Base.length(::Type{ArrayEngine{T, L}}) where {T, L} = L @inline Base.similar(::ArrayEngine{T, L}) where {T, L} = ArrayEngine{T, L}(undef) @generated function Base.similar(A::ArrayEngine, ::Type{T}) where {T} return quote Base.@_inline_meta return ArrayEngine{T}(undef, $(StaticInt{length(A)}())) end end @inline function ArrayEngine{T}(f::Function, ::StaticInt{L}) where {T, L} # not very useful A = ArrayEngine{T, L}(undef) @loopinfo unroll for i in eachindex(A) @inbounds A[i] = f(i) end return A end @inline function ManualMemory.preserve_buffer(A::ArrayEngine) return ManualMemory.preserve_buffer(getfield(A, :data)) end Base.@propagate_inbounds function Base.getindex(A::ArrayEngine, i::IntType) @boundscheck checkbounds(A, i) b = ManualMemory.preserve_buffer(A) GC.@preserve b begin @inbounds ViewEngine(pointer(A))[i] end end Base.@propagate_inbounds function Base.setindex!(A::ArrayEngine, val, i::IntType) @boundscheck checkbounds(A, i) b = ManualMemory.preserve_buffer(A) GC.@preserve b begin @inbounds ViewEngine(pointer(A))[i] = val end end const Engine{T} = Union{ViewEngine{T}, ArrayEngine{T}}
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
9572
const IntSequence{N} = NTuple{N, Union{IntType, StaticInt}} # the recursive type definition is tricky to get right, we put Tuple here to represent it. const IntTuple{N} = Tuple{Vararg{Union{IntType, Tuple}, N}} const GenIntTuple = Union{Int, StaticInt, IntTuple} # note that this type is only **almost** static const StaticIntTuple{N} = Tuple{ Vararg{ Union{StaticInt, Tuple{Vararg{Union{StaticInt, Tuple}}}}, N}} const GenStaticIntTuple = Union{StaticInt, StaticIntTuple} Base.@propagate_inbounds function Base.getindex(@nospecialize(x::Tuple), @nospecialize(I::IntSequence{N})) where {N} return map(Base.Fix1(getindex, x), I) end @inline fmap(f::Function, @nospecialize(t::Tuple)) = map(Base.Fix1(fmap, f), t) @inline fmap(f::Function, x) = f(x) @generated function fmap(f::Function, @nospecialize(t0::Tuple), t1) expr = Expr(:tuple) for i in 1:length(t0.parameters) push!(expr.args, :(fmap(f, t0[$i], t1[$i]))) end return expr end @inline fmap(f::F, t0, t1) where F = f(t0, t1) @inline rank(@nospecialize x::IntTuple) = nfields(x) @inline rank(@nospecialize x::IntType) = one(x) @inline rank(@nospecialize(x::IntTuple), I::IntType...) = rank(getindex(x, I...)) @generated function rank(::Type{T}) where {T<:Tuple} return quote Base.@_inline_meta return $(length(T.parameters)) end end @inline depth(@nospecialize x::IntType) = zero(x) function depth(@nospecialize x::IntTuple) return max(map(depth, x)...) + One() end @inline shape(x::Tuple) = map(shape, x) @inline shape(x) = x @inline product(x::IntType) = x @inline product(x::IntSequence) = prod(x) @inline product(x::IntTuple) = prod(flatten(x)) @generated function product(::Type{T}) where {T<:StaticIntTuple} return quote Base.@_inline_meta return $(typeof(prod(flatten(make_tuple(T))))) end end @inline product_each(x) = map(product, x) @inline product_like_helper(a,b) = product(b) @inline product_like(x, g) = fmap(product_like_helper, g, x) @inline capacity(x::IntType) = x @inline capacity(@nospecialize x::IntTuple) = product(x) @inline capacity(::Type{T}) where {T<:StaticInt} = T @inline capacity(::Type{T}) where {T<:StaticIntTuple} = product(T) @inline flatsum(@nospecialize x::IntTuple) = sum(flatten(x)) function inner_product(@nospecialize(x::IntSequence), @nospecialize(y::IntSequence)) return sum(map(*, x, y)) end function inner_product(@nospecialize(x::IntTuple), @nospecialize(y::IntTuple)) return sum(map(inner_product, x, y)) end Base.cld(@nospecialize(x::IntSequence), @nospecialize(y::IntSequence)) = map(cld, x, y) function Base.cld(x::IntTuple, y::IntTuple) @assert rank(x) >= rank(y) y = append(y, One(), StaticInt{rank(x)}()) return map(cld, x, y) end function shape_div(a::IntType, b::IntType) return a ÷ b != 0 ? a ÷ b : sign(a) * sign(b) end @generated function shape_div(::StaticInt{N}, ::StaticInt{M}) where {N, M} @assert N % M == 0 || M % N == 0 "Cannot divide $(N) by $(M) or vice versa" return :($(StaticInt{shape_div(N, M)}())) end function shape_div(@nospecialize(a::IntType), @nospecialize(b::IntTuple)) return shape_div(a, product(b)) end function shape_div(@nospecialize(a::IntTuple), b::IntType) result, _ = _foldl((init, ai) -> (append(init[1], shape_div(ai, init[2])), shape_div(init[2], ai)), a, ((), b)) return result end function shape_div(a::IntTuple{N}, b::IntTuple{N}) where {N} return map(shape_div, a, b) end function elem_scale(x::IntType, y) @inline return x * capacity(y) end function elem_scale(x::IntTuple{N}, y::IntTuple{N}) where {N} return map(elem_scale, x, y) end @generated function iscongruent(x, y) :($(==(repeat_like(x, Zero()), repeat_like(y, Zero())))) end # Any coordinate into A can also be used as a coordinate into B @inline function iscompatible(@nospecialize(a::Tuple), @nospecialize(b::Tuple)) return length(a)==length(b) && all(map(iscompatible, a, b)) end @inline iscompatible(a::IntType, b::GenIntTuple) = a == capacity(b) @inline iscompatible(a::Tuple, b::IntType) = false # Replace the elements of Tuple B that are paired with 0 in A with 1 @inline filter_zeros(a::Zero, x) = One() @inline filter_zeros(a::IntType, x) = x function filter_zeros(x::IntTuple{N}, y::IntTuple{N}) where {N} return map(filter_zeros, x, y) end filter_zeros(t::Tuple) = filter_zeros(t, t) function slice(A::Tuple, index::Tuple) length(A) == length(index) || throw(DimensionMismatch("Array and index must have the same rank")) return tuple_cat(map(slice, A, index)...) end function slice(A, index::Colon) @inline return tuple(A) end function slice(A, index::IntType) @inline return () end function dice(@nospecialize(A::Tuple), @nospecialize(index::Tuple)) length(A) == length(index) || throw(DimensionMismatch("Array and index must have the same rank")) return tuple_cat(map(dice, A, index)...) end function dice(A, index::Colon) @inline return () end function dice(A, index::IntType) @inline return tuple(A) end function make_int_tuple(N::IntType, t, n::IntType, init::IntType) ntuple(N) do i return i ≤ n ? t[i] : init end end # fill_int_tuple_from # make_int_tuple_from #function to_array(::Type{T}, @nospecialize(x::IntTuple)) where {T} # x = flatten(x) # N = length(x) # result = Array{T}(undef, N) # ntuple(N) do i # @inbounds result[i] = x[i] # end # return result #end # comparison # Base.:(<)(x::Int, y::Tuple) = x < product(y)? maybe we need this for non congruent shapes #lex_less = < #lex_leq = <= #lex_geq = >= colex_less(x::IntType, y::IntType) = x < y colex_less(::Tuple{}, ::Tuple{}) = false colex_less(::Tuple{}, ::Tuple) = true colex_less(::Tuple, ::Tuple{}) = false function colex_less(@nospecialize(t1::Tuple), @nospecialize(t2::Tuple)) a, b = last(t1), last(t2) if a ≠ b return colex_less(a, b) end return colex_less(Base.front(t1), Base.front(t2)) end elem_less(x::IntType, y::IntType) = x < y elem_less(::Tuple{}, ::Tuple{}) = true elem_less(::Tuple{}, ::Tuple) = true # TupleA is exhausted elem_less(::Tuple, ::Tuple{}) = false # TupleA is not exhausted, TupleB is exhausted function elem_less(@nospecialize(t1::Tuple), @nospecialize(t2::Tuple)) a, b = first(t1), first(t2) if length(t1) == length(t2) == 1 return a < b end if !((a == b) || elem_less(a, b)) return false end return elem_less(Base.tail(t1), Base.tail(t2)) end elem_leq(x, y) = !elem_less(y, x) elem_gtr(x, y) = elem_less(y, x) elem_geq(x, y) = !elem_geq(x, y) @inline function increment(coord::IntType, shape::IntType) return ifelse(coord < shape, coord + one(coord), one(coord)) end function increment(coord, shape) c, s = first(coord), first(shape) if length(coord) == length(shape) == 1 return increment(c, s) end if c != s return (increment(c, s), Base.tail(coord)...) end return (repeat_like(s, 1), increment(Base.tail(coord), Base.tail(shape))...) end # iterator struct ForwardCoordUnitRange{N, B, E} <: AbstractUnitRange{Int} start::B stop::E function ForwardCoordUnitRange(start::IntTuple{N}, stop::IntTuple{N}) where {N} return new{N, typeof(start), typeof(stop)}(start, stop) end end function HierIndices(shape::IntTuple) start = repeat_like(shape, 1) return ForwardCoordUnitRange(start, shape) end Base.first(x::ForwardCoordUnitRange) = getfield(x, :start) Base.last(x::ForwardCoordUnitRange) = getfield(x, :stop) Base.length(x::ForwardCoordUnitRange) = length(getfield(x, :stop)) function Base.iterate(x::ForwardCoordUnitRange) start = getfield(x, :start) return (start, start) end function Base.iterate(x::ForwardCoordUnitRange, state) stop = getfield(x, :stop) if state == stop return nothing end new_state = increment(state, stop) return (new_state, new_state) end make_tuple(::Type{StaticInt{N}}) where {N} = StaticInt{N}() make_tuple(::Type{Colon}) = Colon() @generated function make_tuple(::Type{S}) where {S<:Tuple} expr = Expr(:tuple) for p in S.parameters push!(expr.args, make_tuple(p)) end return expr end make_tuple(::Type{S}) where {S} = S() function Base.getindex(x::StaticInt, i::Integer) @inline @boundscheck i == 1 || throw(BoundsError(x, i)) x end @inline Base.getindex(x::StaticInt, ::StaticInt{1}) = x # @inline Base.getindex(x::StaticInt, ::StaticInt{N}) where {N} = throw(BoundsError(x, N)) """ static findfirst, returns a StaticInt, returns N+1 if not found, specialized on `f` """ function static_findfirst(::G, t::Tuple, ::Tuple{}) where {G} @inline return StaticInt{length(t)+1}() end function static_findfirst(f::G, t::Tuple, I::Tuple) where {G} return (@inline; f(t[first(I)])) ? first(I) : static_findfirst(f, t, Base.tail(I)) end static_findfirst(f::G, t::StaticInt) where {G} = ifelse(f(t), One(), _2) static_findfirst(f::G, t::Tuple) where {G} = static_findfirst(f, t, ntuple(static, length(t)))
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
39708
""" Layout{N, Shape, Stride} A `Layout` is a pair of `Shape` and `Stride` tuples. The `Shape` tuple contains the number of elements in each dimension, and the `Stride` tuple contains the number of elements to skip to get to the next element in each dimension. ## Fields - `shape`. - `stride`. ## Indexing A `Layout` can be indexed with three types of indices: - `Int`: a linear index in a column-major order. - `IntTuple`: a hierarchical index. It has the exact hierarchical structure as defined by the `Shape`. - `IntTuple`: a congruent index. A tuple of `N` mixes hierarchical and linear indices along each dimension. ## Examples ```julia julia> layout = Layout((4, (2, 2)), (2, (1, 8))); julia> print_layout(ans) (4, (2, 2)):(2, (1, 8)) 1 2 3 4 +----+----+----+----+ 1 | 1 | 2 | 9 | 10 | +----+----+----+----+ 2 | 3 | 4 | 11 | 12 | +----+----+----+----+ 3 | 5 | 6 | 13 | 14 | +----+----+----+----+ 4 | 7 | 8 | 15 | 16 | +----+----+----+----+ julia> layout(6) # linear index 4 julia> layout((2,2)) # hierarchical index 4 julia> layout((2,(2,1))) # congruent index 4 ``` """ struct Layout{N, Shape, Stride} shape::Shape stride::Stride # shape and stride must be congruent function Layout(shape::IntTuple{N}, stride::IntTuple{N}) where {N} return new{N, typeof(shape), typeof(stride)}(shape, stride) end function Layout(shape::IntType, stride::IntType) return new{1, typeof(shape), typeof(stride)}(shape, stride) end end """ A tuple of `Layout`s, `Colon`s, integers, or tiles. """ const Tile{N} = Tuple{ Vararg{ Union{Colon, Layout, Int, StaticInt, Tuple{Vararg{Union{Colon, Layout, Int, StaticInt}}}}, N}} const StaticLayout{N, S, R} = Layout{N, S, R } where {S <: Union{StaticInt, StaticIntTuple{N}}, R <: Union{StaticInt, StaticIntTuple{N}}} @inline function StaticLayout{N, S, R}() where {N, S <: StaticIntTuple{N}, R <: StaticIntTuple{N}} return Layout(make_tuple(S), make_tuple(R)) end shape(l::Layout) = getfield(l, :shape) shape(l::Layout{1}, ::One) = shape(l) shape(l, i::IntType) = getindex(shape(l), i) shape(l::Type{<:StaticLayout{N, S, R}}) where {N, S, R} = S Base.stride(l::Layout) = getfield(l, :stride) Base.stride(l::Layout{1}, ::One) = stride(l) Base.stride(l::Layout, i::IntType) = getindex(stride(l), i) Base.stride(l::Type{<:StaticLayout{N, S, R}}) where {N, S, R} = R Static.static(l::Layout) = make_layout(static(shape(l)), static(stride(l))) Static.is_static(l::Layout) = dynamic(is_static(shape(l))) && dynamic(is_static(stride(l))) # map a logical coordinate to a linear index function (l::Layout)(@nospecialize coord::IntType) return coord_to_index(coord, shape(l), stride(l)) end function (l::Layout)(@nospecialize coord::IntTuple) return coord_to_index(coord, shape(l), stride(l)) end @generated function (l::Layout)(coord) # coord is fixed with colon @assert hascolon(coord) return :(slice(l, coord)) end function (l::Layout)(c1, c2, c3...) return l((c1, c2, c3...)) end # map 1D index to a hier coordinate function get_hier_coord(l::Layout, @nospecialize index::Union{Integer, StaticInt}) @inline return index_to_coord(index, l.shape, l.stride) end """ get_congr_coord(l::Layout, index::Integer) Get the flat congruent coordinate from the physical index `index`. """ function get_congr_coord(l::Layout{N}, @nospecialize index::Union{Integer, StaticInt}) where {N} @inline return coord_to_coord(get_hier_coord(l, index), l.shape, ntuple(Returns(One()), Val(N))) end function get_linear_coord(l::Layout, @nospecialize index::Union{Integer, StaticInt}) @inline return coord_to_index(get_hier_coord(l, index), l.shape) end """ make_layout(shape::IntTuple, stride::IntTuple) make_layout(shape::IntTuple, major=GenColMajor) Construct a layout with the given shape and stride. If the stride is not given, it is set to col-major compact stride. See alse [`GenColMajor`](@ref) and [`GenRowMajor`](@ref). """ function make_layout(shape::IntTuple, stride::IntTuple) @inline return Layout(shape, stride) end function make_layout(shape::IntType, stride::IntType) @inline return Layout(shape, stride) end function make_layout(shape::GenIntTuple) @inline return Layout(shape, compact_col_major(shape)) end """ make_layout(::Layouts...) Concatenate layouts into a single layout. """ function make_layout(layouts::Layout...) return make_layout(map(shape, layouts), map(stride, layouts)) # concatenation end function make_layout(shape::GenIntTuple, ::Type{GenColMajor}) return make_layout(shape, compact_col_major(shape)) end function make_layout(shape::GenIntTuple, ::Type{GenRowMajor}) return make_layout(shape, compact_row_major(shape)) end function make_layout(shape::Type{S}, stride::Type{D}) where {N, S <: IntTuple{N}, D <: IntTuple{N}} @inline return Layout{N, S, D} end function make_layout(shape::Type{S}, stride::Type{D}) where {S <: StaticInt, D <: StaticInt} @inline return Layout{1, S, D} end """ cat(::Layouts...) Concatenate layouts into a single layout. """ function Base.cat(layouts::Layout...) return make_layout(map(shape, layouts), map(stride, layouts)) end """ @Layout(shape, stride=nothing) Construct a static layout with the given shape and stride. ## Arguments - `shape`: a tuple of integers or a single integer - `stride`: a tuple of integers, a single integer, [`GenColMajor`](@ref) or [`GenRowMajor`](@ref) """ macro Layout(expr1, expr2=nothing) if expr2 === nothing layout_call = :(make_layout(static($expr1))) elseif expr2 isa Symbol layout_call = :(make_layout(static($expr1), $expr2)) else expr2 layout_call = :(make_layout(static($expr1), static($expr2))) end return layout_call end """ make_ordered_layout(shape, order) make_ordered_layout(layout) Construct a compact layout with the given shape and the stride is following the given order. ## Examples ```julia julia> MoYe.make_ordered_layout((3, 5), (2, 6)) (3, 5):(_1, 3) julia> MoYe.make_ordered_layout((3, 5), (10, 2)) (3, 5):(5, _1) ``` """ function make_ordered_layout(shape, order) # The arguments may be static, which is not handled return make_layout(shape, compact_order(shape, order)) end function make_ordered_layout(layout::Layout) return make_ordered_layout(shape(layout), stride(layout)) end function make_layout_like(layout::Layout,) return make_layout(shape(layout), compact_order(filter_zeros(stride(layout), shape(layout)), stride(layout))) end # make_identity_layout """ getindex(layout::Layout, Is...) Get the sub-layout of `layout` with the given indices. """ function Base.getindex(layout::Layout, Is::IntType...) @inline return make_layout(getindex(shape(layout), Is...), getindex(stride(layout), Is...)) end function Base.getindex(@nospecialize(t::Layout), r::AbstractUnitRange) @inline return ntuple(i -> t[i + first(r) - 1], length(r)) end function Base.getindex(layout::Type{<:Layout}, ::StaticInt{I}) where {I} @inline return make_layout(shape(layout).parameters[I], stride(layout).parameters[I]) end # Layout as iterator function Base.firstindex(l::Layout) return 1 end function Base.lastindex(l::Layout) return rank(l) end function Base.first(l::Layout) return l[1] end function Base.last(l::Layout{N}) where {N} return l[N] end function Base.length(l::Layout{N}) where {N} return N end function Base.iterate(l::Layout) return l[1], 1 end function Base.iterate(x::Layout{N}, state) where {N} if state == N return nothing end new_state = state + 1 return (x[new_state], new_state) end """ flatten(layout::Layout) Remove the hierarchy of the layout and make it a flat layout. ## Examples ```julia julia> layout = make_layout(((4, 3), 1), ((3, 1), 0)) ((4, 3), 1):((3, 1), 0) julia> print(flatten(layout)) (4, 3, 1):(3, 1, 0) ``` """ function flatten(layout::Layout) return make_layout(flatten(shape(layout)), flatten(stride(layout))) end """ size(::Layout) size(::Layout, i::Union{Int, StaticInt}) Get the cardinality of the domain of the layout. See also [`cosize`](@ref). """ function Base.size(layout::Layout) return capacity(shape(layout)) end function Base.size(layout::Layout, i::IntType) return capacity(shape(layout)[i]) end function Base.size(layout::Type{<:StaticLayout}) return capacity(shape(layout)) end function Base.size(layout::Type{<:StaticLayout}, ::StaticInt{I}) where {I} return capacity(shape(layout).parameters[I]) end """ rank(::Layout) rank(::Layout, i::Union{Int, StaticInt}) Get the rank, i.e., the dimensionality, of the layout. """ function rank(layout::Layout) return rank(shape(layout)) end function rank(layout::Layout, i::Int) return rank(shape(layout)[i]) end function rank(::Type{<:Layout{N}}) where {N} return N end """ depth(::Layout) depth(::Layout, i::Union{Int, StaticInt}) Get the depth of the hierarchy of the layout. For example, the depth of `(1,2)` is 1, and the depth of `((1,2),3)` is 2. """ function depth(layout::Layout) return depth(shape(layout)) end function depth(layout::Layout, i::IntType) return depth(shape(layout)[i]) end """ cosize(::Layout) cosize(::Layout, i::Union{Int, StaticInt}) Get the cardinality of the codomain of the layout. See also [`size`](@ref). """ function cosize(layout::Layout) return layout(size(layout)) ## Note: negative stride is not supported end function coord_to_index(layout::Layout, coord) return coord_to_index(coord, shape(layout), stride(layout)) end function coord_to_index0(layout::Layout, coord) return coord_to_index0(coord, shape(layout), stride(layout)) end @inline iscompatible(a::Layout, b::Layout) = iscompatible(shape(a), shape(b)) function slice(layout::Layout, coord) return make_layout(slice(shape(layout), coord), slice(stride(layout), coord)) end function slice_and_offset(layout::Layout, coord) idx = coord_to_index(layout, coord) return slice(layout, coord), (idx - one(idx)) end function dice(layout::Layout, coord) return make_layout(dice(shape(layout), coord), dice(stride(layout), coord)) end function append(layout::Layout{N}, ::Layout, ::StaticInt{N}) where {N} return layout end function append(layout::Layout{N}, ::StaticInt{N}) where {N} return layout end function append(layout::Layout, x::Layout, N::StaticInt) return make_layout(append(shape(layout), shape(x), N), append(stride(layout), stride(x), N)) end function append(layout::Layout, N::StaticInt) return append(layout, @Layout(1, 0), N) end function prepend(layout::Layout, x::Layout, N::StaticInt) return make_layout(prepend(shape(layout), shape(x), N), prepend(stride(layout), stride(x), N)) end function prepend(layout::Layout, N::StaticInt) return prepend(layout, @Layout(1, 0), N) end function replace(layout::Layout, x::Layout, N::IntType) return make_layout(replace(shape(layout), shape(x), N), replace(stride(layout), stride(x), N)) end function group(layout::Layout, B::IntType, E::IntType) return make_layout(group(shape(layout), B, E), group(stride(layout), B, E)) end @generated function transform_layout(f, t1, t2) R1 = rank(t1) R2 = rank(t2) expr = Expr(:call, :make_layout) for i in 1:min(R1, R2) push!(expr.args, Expr(:call, :f, :(t1[$i]), :(t2[$i]))) end if R1 < R2 for i in (R1 + 1):R2 push!(expr.args, :(t2[$i])) end elseif R1 > R2 for i in (R2 + 1):R1 push!(expr.args, :(t1[$i])) end end return quote Base.@_inline_meta $expr end end @generated function transform_layout(f, t1, t2, ::StaticInt{N}) where {N} expr = Expr(:call, :make_layout) for i in 1:N push!(expr.args, Expr(:call, :f, :(t1[$i]), :(t2[$i]))) end return expr end @generated function bw_coalesce(::StaticInt{I}, old_shape, old_stride, new_shape, new_stride) where {I} expr = Expr(:block) for i in I:-1:1 if old_shape.parameters[i] == One continue elseif new_shape == One new_shape = old_shape.parameters[i] new_stride = old_stride.parameters[i] push!(expr.args, :(new_shape = old_shape[$(static(i))])) push!(expr.args, :(new_stride = old_stride[$(static(i))])) elseif front(new_shape)<:StaticInt && old_shape.parameters[i]<:StaticInt && old_stride.parameters[i]<:StaticInt && front(new_stride)<:StaticInt && old_shape.parameters[i] * old_stride.parameters[i] == front(new_stride) new_shape = replace_front(new_shape, old_shape.parameters[i] * front(new_shape)) new_stride = replace_front(new_stride, old_stride.parameters[i]) push!(expr.args, :(new_shape = replace_front(new_shape, old_shape[$(static(i))] * front(new_shape)))) push!(expr.args, :(new_stride = replace_front(new_stride, old_stride[$(static(i))]))) else new_shape = prepend(new_shape, old_shape.parameters[i]) new_stride = prepend(new_stride, old_stride.parameters[i]) push!(expr.args, :(new_shape = prepend(new_shape, old_shape[$(static(i))]))) push!(expr.args, :(new_stride = prepend(new_stride, old_stride[$(static(i))]))) end end return quote $expr if isa(new_shape,StaticInt) && new_shape == One() return Layout(One(), Zero()) else return Layout(new_shape, new_stride) end end end """ coalesce(layout::Layout) Coalesce the layout by merging adjacent dimensions with stride 1. ## Examples ```julia julia> layout = @Layout (2, (1, 6)) (1, (6, 2)) (_2, (_1, _6)):(_1, (_6, _2)) julia> print(coalesce(layout)) _12:_1 ``` """ function Base.coalesce(layout::Layout) flat_shape = flatten(shape(layout)) flat_stride = flatten(stride(layout)) return bw_coalesce(static(rank(flat_shape) - 1), flat_shape, flat_stride, last(flat_shape), last(flat_stride)) end function Base.coalesce(layout::Layout, @nospecialize trg_profile::IntTuple) # respect the target profile @assert rank(trg_profile) <= rank(layout) return transform_layout(coalesce, layout, trg_profile) end function Base.coalesce(layout::Layout, trg_profile) return coalesce(layout) end function filter_zeros(l::Layout) return make_layout(filter_zeros(stride(l), shape(l)), stride(l)) end function Base.filter(l::Layout) return coalesce(filter_zeros(l)) end # shortcuts function composition(lhs_shape::IntType, lhs_stride::IntType, rhs_shape::IntType, rhs_stride::StaticInt{0}) return Layout(rhs_shape, rhs_stride) end # Base case a:b ∘ c:d = c:(b*d) function composition(lhs_shape::IntType, lhs_stride::IntType, rhs_shape::IntType, rhs_stride::IntType) result_stride = lhs_stride * rhs_stride return Layout(rhs_shape, result_stride) end function composition(lhs_shape::Tuple, lhs_stride::Tuple, rhs_shape::IntType, rhs_stride::StaticInt{1}) result_shape_0 = Base.front(lhs_shape) result_shape_1, rest_shape = _foldl((init, si) -> (append(init[1], min(abs(si), init[2])), shape_div(init[2], abs(si))), result_shape_0, ((), rhs_shape)) return bw_coalesce(static(rank(lhs_shape) - 1), result_shape_1, lhs_stride, rest_shape, last(lhs_stride)) end function composition(lhs_shape::Tuple, lhs_stride::Tuple, rhs_shape::IntType, rhs_stride::StaticInt{0}) return Layout(rhs_shape, rhs_stride) end function composition(lhs_shape::Tuple, lhs_stride::Tuple, rhs_shape::IntType, rhs_stride::IntType) result_shape_0 = Base.front(lhs_shape) result_stride_0 = Base.front(lhs_stride) result_shape_1, rest_stride = _foldl((init, di) -> (append(init[1], shape_div(di, init[2])), shape_div(init[2], di)), result_shape_0, ((), rhs_stride)) result_stride_1 = elem_scale(result_stride_0, shape_div(result_shape_0, result_shape_1)) result_shape_2, rest_shape = _foldl((init, si) -> (append(init[1], min(abs(si), init[2])), shape_div(init[2], abs(si))), result_shape_1, ((), rhs_shape)) return bw_coalesce(static(rank(lhs_shape) - 1), result_shape_2, result_stride_1, rest_shape, rest_stride * last(lhs_stride)) end # distributivity with concatenation @generated function composition(lhs_shape, lhs_stride, rhs_shape::IntTuple{N}, rhs_stride::IntTuple{N}) where {N} expr = Expr(:call, :make_layout) for i in 1:N push!(expr.args, :(MoYe.composition(lhs_shape, lhs_stride, rhs_shape[$i], rhs_stride[$i]))) end return expr end """ composition(l1::Layout, l2::Layout) Compose two layouts as composing two functions. You can use `∘` operator as well. ## Examples ```julia julia> make_layout(20, 2) ∘ make_layout((4, 5), (1, 4)) (4, 5):(2, 8) julia> make_layout(20, 2) ∘ make_layout((4, 5), (5, 1)) (4, 5):(10, 2) ``` """ function composition(lhs::Layout, rhs::Layout) flat_shape = flatten(shape(lhs)) flat_stride = flatten(stride(lhs)) return composition(flat_shape, flat_stride, shape(rhs), stride(rhs)) end @generated function composition(lhs::Layout, rhs::Tuple) @assert rank(rhs) <= rank(lhs) return :(transform_layout(composition, lhs, rhs, $(static(rank(rhs))))) end function composition(lhs::Layout, rhs::Colon) return lhs end function composition(lhs::Layout, rhs) return composition(lhs, make_layout(rhs)) end function Base.:(∘)(l1::Layout, l2) return composition(l1, l2) end function withshape(l::Layout, shape::GenIntTuple) return composition(l, make_layout(shape)) end function withshape(l::Layout, s1, s2, s3...) return composition(l, make_layout((s1, s2, s3...))) end function _complement_inner(init, i) shape, stride, result_shape, result_stride = init min_stride = Static.reduce_tup(min, stride) min_idx = static_findfirst(==(min_stride), stride) new_shape = min_stride ÷ result_stride[i] new_stride = min_stride * shape[min_idx] @assert !iszero(new_shape) return (remove(shape, min_idx), remove(stride, min_idx), append(result_shape, new_shape), append(result_stride, new_stride)) end function _complement(shape::IntType, stride::StaticInt{0}, cotarget::IntType) return make_layout(coalesce(cotarget)) end function _complement(shape::IntType, stride::IntType, cotarget::IntType) return _complement(tuple(shape), tuple(stride), cotarget) end function _complement(shape::IntTuple{R}, stride::StaticIntTuple{R}, cotarget::IntType) where {R} shape_, stride_, result_shape, result_stride = _foldl(_complement_inner, ntuple(identity,Val(R - 1)), (shape, stride, (), (One(),))) new_shape = stride_[1] ÷ back(result_stride) @assert !iszero(new_shape) result_shape = append(result_shape, new_shape) new_stride = shape_[1] * stride_[1] rest_shape = coalesce(cld(cotarget, new_stride)) rest_stride = compact_col_major(rest_shape, new_stride) return coalesce(make_layout((result_shape, rest_shape), (result_stride, rest_stride))) end """ complement(l::Layout, cosize::IntType) A complement layout of `A` is a layout `B` such that `(A, B)` is a compact layout of size `cosize`. """ function complement(l::Layout, cotarget::IntType) filter_layout = filter(l) return _complement(shape(filter_layout), stride(filter_layout), cotarget) end function complement(l::Layout) filter_layout = filter(l) return _complement(shape(filter_layout), stride(filter_layout), cosize(filter_layout)) end @generated function inverse_seq(shape::StaticIntTuple{N}, stride::StaticIntTuple{N}, I::StaticInt{II}) where {II, N} if N < II return :((Is)) else next_stride = stride.parameters[II] * shape.parameters[II] next_idx = static_findfirst(==(next_stride), tuple(stride.parameters...)) Is = (I(),) while N >= dynamic(next_idx) Is = (Is..., next_idx) next_stride = stride.parameters[dynamic(next_idx)] * shape.parameters[dynamic(next_idx)] next_idx = static_findfirst(==(next_stride), tuple(stride.parameters...)) end return :($Is) end end # need this specialization to avoid type instability... Base.@assume_effects :total function inverse_seq(shape, stride, I::StaticInt) length(shape) < I && return () @inbounds next_stride = stride[I] * shape[I] if isa(next_stride, StaticInt) next_idx = static_findfirst(==(next_stride), stride) return inverse_seq(shape, stride, next_idx, I) else return tuple(I) end end function inverse_seq(shape, stride, I::StaticInt, I′::StaticInt, Is::Vararg{StaticInt, N}) where {N} length(shape) < I && return (I′, Is...) @inbounds next_stride = stride[I] * shape[I] if isa(next_stride, StaticInt) next_idx = static_findfirst(==(next_stride), stride) return inverse_seq(shape, stride, next_idx, I′, Is..., I) else return (I′, Is..., I) end end @inline right_inverse(x::Colon) = x """ right_inverse(layout::Layout) Return the right inverse of `layout`, i.e. a layout `layout′` such that `(layout ∘ layout′)(i) == (i)`. The domain of `layout′` is chosen to be the maximum continues squence of the codomain of `layout`. """ function right_inverse(layout::Layout) flat_layout = coalesce(layout) astride = map(abs, flat_layout.stride) next_I = findfirst(Base.Fix1(===, One()), astride) isnothing(next_I) && return @Layout(1, 0) iseq = inverse_seq(flat_layout.shape, astride, static(next_I)) isempty(iseq) && return @Layout(1, 0) rstride = compact_col_major(flat_layout.shape) return make_layout(unwrap(map(Base.Fix1(shape, flat_layout), iseq)), unwrap(map(i -> sign(Base.Fix1(stride, flat_layout)(i)) * Base.Fix1(getindex, rstride)(i), iseq))) end """ left_inverse(layout::Layout) Return the left inverse of `layout`, i.e. a layout `layout′` such that `(layout′ ∘ layout)(i) == (i)`. The domain of `layout′` is chosen to be the maximum continues squence of the domain of `layout`. """ left_inverse(layout::Layout) = right_inverse(make_layout(layout, complement(layout))) left_inverse(::Colon) = Colon() function max_common_layout(a::Layout, b::Layout) inv_b = right_inverse(b) common = coalesce(composition(a, inv_b)) if is_static(shape(common, 1)) && (stride(common, 1) == One()) return composition(inv_b, common[1]) else return @Layout 1 0 end end function max_common_vector(a::Layout, b::Layout) inv_b = right_inverse(b) common = coalesce(composition(a, inv_b)) if known(is_static(shape(common, 1))) && (stride(common, 1) == One()) return shape(common, 1) else return One() end end function _zip(layout::Layout) return make_layout(_zip(shape(layout)), _zip(stride(layout))) end # this is equivalent to make_layout(map(make_layout, l1, l2)...) function _zip(layoutA::Layout, layoutB::Layout) return make_layout(_zip(shape(layoutA), shape(layoutB)), _zip(stride(layoutA), stride(layoutB))) end @inline function transpose(l::Layout{2}) return make_layout(l[2], l[1]) end function tile_unzip(layout::Layout, tile) return make_layout(zip2_by(shape(layout), tile), zip2_by(stride(layout), tile)) end """ logical_product(A::Layout, B::Layout) Compute the logical product of two layouts. Indexing through the first mode of the resulting layout corresponds to indexing through `A` and indexing through the second mode corresponds to indexing through `B`. ```julia julia> tile = @Layout((2, 2), (1, 2)); julia> print_layout(tile) (_2, _2):(_1, _2) 1 2 +---+---+ 1 | 1 | 3 | +---+---+ 2 | 2 | 4 | +---+---+ julia> matrix_of_tiles = @Layout((3, 4), (4, 1)); julia> print_layout(matrix_of_tiles) (_3, _4):(_4, _1) 1 2 3 4 +----+----+----+----+ 1 | 1 | 2 | 3 | 4 | +----+----+----+----+ 2 | 5 | 6 | 7 | 8 | +----+----+----+----+ 3 | 9 | 10 | 11 | 12 | +----+----+----+----+ julia> print_layout(logical_product(tile, matrix_of_tiles)) ((_2, _2), (_3, _4)):((_1, _2), (_16, _4)) 1 2 3 4 5 6 7 8 9 10 11 12 +----+----+----+----+----+----+----+----+----+----+----+----+ 1 | 1 | 17 | 33 | 5 | 21 | 37 | 9 | 25 | 41 | 13 | 29 | 45 | +----+----+----+----+----+----+----+----+----+----+----+----+ 2 | 2 | 18 | 34 | 6 | 22 | 38 | 10 | 26 | 42 | 14 | 30 | 46 | +----+----+----+----+----+----+----+----+----+----+----+----+ 3 | 3 | 19 | 35 | 7 | 23 | 39 | 11 | 27 | 43 | 15 | 31 | 47 | +----+----+----+----+----+----+----+----+----+----+----+----+ 4 | 4 | 20 | 36 | 8 | 24 | 40 | 12 | 28 | 44 | 16 | 32 | 48 | +----+----+----+----+----+----+----+----+----+----+----+----+ ``` """ function logical_product(layout::Layout, tile::Layout) return make_layout(layout, composition(complement(layout, size(layout) * cosize(tile)), tile)) end function logical_product(layout::Layout, tile::Colon) return layout end function logical_product(layout::Layout, tile::IntType) return logical_product(layout, make_layout(tile)) end function logical_product(layout::Layout, tile::Tuple) return transform_layout(logical_product, layout, tile) end function zipped_product(layout::Layout, tile) return tile_unzip(logical_product(layout, tile), tile) end function tiled_product(layout::Layout, tile) d = zipped_product(layout, tile) return d(:, repeat(:, rank(tile))) end """ blocked_product(tile::Layout, matrix_of_tiles::Layout, coalesce_result::Bool=false) Compute the blocked product of two layouts. Indexing through the first mode of the resulting layout corresponds to indexing through the cartesian product of the first mode of `tile` and the first mode of `matrix_of_tiles`. Indexing through the second mode is similar. If `coalesce_result` is true, then the result is coalesced. ```julia julia> tile = @Layout (2, 2); julia> matrix_of_tiles = @Layout (3, 4) (4, 1); julia> print_layout(blocked_product(tile, matrix_of_tiles)) ((_2, _3), (_2, _4)):((_1, _16), (_2, _4)) 1 2 3 4 5 6 7 8 +----+----+----+----+----+----+----+----+ 1 | 1 | 3 | 5 | 7 | 9 | 11 | 13 | 15 | +----+----+----+----+----+----+----+----+ 2 | 2 | 4 | 6 | 8 | 10 | 12 | 14 | 16 | +----+----+----+----+----+----+----+----+ 3 | 17 | 19 | 21 | 23 | 25 | 27 | 29 | 31 | +----+----+----+----+----+----+----+----+ 4 | 18 | 20 | 22 | 24 | 26 | 28 | 30 | 32 | +----+----+----+----+----+----+----+----+ 5 | 33 | 35 | 37 | 39 | 41 | 43 | 45 | 47 | +----+----+----+----+----+----+----+----+ 6 | 34 | 36 | 38 | 40 | 42 | 44 | 46 | 48 | +----+----+----+----+----+----+----+----+ ``` """ function blocked_product(block::Layout{N}, layout::Layout{M}, coalesce_result::Bool=false) where {N, M} R = max(N, M) padded_block = append(block, StaticInt{R}()) padded_layout = append(layout, StaticInt{R}()) result = logical_product(padded_block, padded_layout) @inbounds result = _zip(result[1], result[2]) coalesce_result && return coalesce(result, repeat(One(), R)) return result end """ raked_product(tile::Layout, matrix_of_tiles::Layout, coalesce_result::Bool=false) The tile is shattered or interleaved with the matrix of tiles. ```julia julia> tile = @Layout (2, 2) (1, 2); julia> matrix_of_tiles = @Layout (3, 4) (4, 1); julia> print_layout(raked_product(tile, matrix_of_tiles)) ((_3, _2), (_4, _2)):((_16, _1), (_4, _2)) 1 2 3 4 5 6 7 8 +----+----+----+----+----+----+----+----+ 1 | 1 | 5 | 9 | 13 | 3 | 7 | 11 | 15 | +----+----+----+----+----+----+----+----+ 2 | 17 | 21 | 25 | 29 | 19 | 23 | 27 | 31 | +----+----+----+----+----+----+----+----+ 3 | 33 | 37 | 41 | 45 | 35 | 39 | 43 | 47 | +----+----+----+----+----+----+----+----+ 4 | 2 | 6 | 10 | 14 | 4 | 8 | 12 | 16 | +----+----+----+----+----+----+----+----+ 5 | 18 | 22 | 26 | 30 | 20 | 24 | 28 | 32 | +----+----+----+----+----+----+----+----+ 6 | 34 | 38 | 42 | 46 | 36 | 40 | 44 | 48 | +----+----+----+----+----+----+----+----+ ``` """ function raked_product(block::Layout{N}, layout::Layout{M}, coalesce_result::Bool=false) where {N, M} R = max(N, M) padded_block = append(block, StaticInt{R}()) padded_layout = append(layout, StaticInt{R}()) result = logical_product(padded_block, padded_layout) @inbounds result = _zip(result[2], result[1]) coalesce_result && return coalesce(result, repeat(One(), R)) return result end function tile_to_shape(l::Layout{N}, trg_shape::IntTuple{R}, ord_shape=GenColMajor) where {N, R} @assert N <= R "Cannot tile to a smaller shape" padded_layout = append(l, StaticInt{R}()) product_shape = shape_div(product_each(trg_shape), product_each(shape(padded_layout))) return coalesce(blocked_product(padded_layout, make_ordered_layout(product_shape, ord_shape)), product_shape) end @generated function safe_div(::StaticInt{N}, ::StaticInt{M}) where {N, M} R = div(N, M) @assert R * M==N "Safe division failed" return :($(StaticInt{R}())) end @inline safe_div(x::IntType, y::IntType) = div(x, y) struct Upcast{N<:StaticInt} end function (::Upcast)(shape::IntType, stride::StaticInt{0}) @inline return make_layout(shape, stride) end function (::Upcast{m})(shape::IntType, stride::StaticInt) where m @inline return make_layout(shape_div(shape, shape_div(m(), abs(stride))), shape_div(stride, m())) end function (::Upcast{m})(shape::IntType, stride::DInt) where m @inline return make_layout(shape, safe_div(stride, m())) end function (f::Upcast{m})(shape::Tuple, stride::Tuple) where m return transform_layout(f, shape, stride) end function upcast(layout::Layout, ::StaticInt{M}) where M @inline return Upcast{StaticInt{M}}()(layout.shape, layout.stride) end struct Downcast{N<:StaticInt} end function (::Downcast{N})(shape::IntType, stride::StaticInt{1}) where N @inline return make_layout(shape * N(), stride) end function (::Downcast{N})(shape::IntType, stride::StaticInt{-1}) where N @inline return make_layout(shape * N(), stride) end function (::Downcast{N})(shape::IntType, stride::StaticInt) where N @inline return make_layout(shape, stride * N()) end function (::Downcast{N})(shape::DInt, stride::DInt) where N @inline if isone(stride) return make_layout(shape * N(), stride) else return make_layout(shape, stride * N()) end end function (f::Downcast{N})(shape::Tuple, stride::Tuple) where N @inline return transform_layout(f, shape, stride) end function downcast(layout::Layout, ::StaticInt{M}) where M @inline return Downcast{StaticInt{M}}()(layout.shape, layout.stride) end @generated function recast(layout::Layout, ::Type{NewType}, ::Type{OldType}) where {NewType, OldType} if sizeof(NewType) == sizeof(OldType) return :layout elseif sizeof(NewType) > sizeof(OldType) @assert sizeof(NewType) % sizeof(OldType)==0 "Cannot recast $OldType to $NewType" return :(upcast(layout, $(StaticInt{sizeof(NewType) ÷ sizeof(OldType)}()))) else @assert sizeof(OldType) % sizeof(NewType)==0 "Cannot recast $OldType to $NewType" return :(downcast(layout, $(StaticInt{sizeof(OldType) ÷ sizeof(NewType)}()))) end end """ logical_divide(layout::Layout, tile::Tile) Gather the elements of `layout` along all modes into blocks according to `tile`. ```julia julia> raked_prod = @Layout ((3, 2), (4, 2)) ((16, 1), (4, 2)); julia> print_layout(raked_prod) ((_3, _2), (_4, _2)):((_16, _1), (_4, _2)) 1 2 3 4 5 6 7 8 +----+----+----+----+----+----+----+----+ 1 | 1 | 5 | 9 | 13 | 3 | 7 | 11 | 15 | +----+----+----+----+----+----+----+----+ 2 | 17 | 21 | 25 | 29 | 19 | 23 | 27 | 31 | +----+----+----+----+----+----+----+----+ 3 | 33 | 37 | 41 | 45 | 35 | 39 | 43 | 47 | +----+----+----+----+----+----+----+----+ 4 | 2 | 6 | 10 | 14 | 4 | 8 | 12 | 16 | +----+----+----+----+----+----+----+----+ 5 | 18 | 22 | 26 | 30 | 20 | 24 | 28 | 32 | +----+----+----+----+----+----+----+----+ 6 | 34 | 38 | 42 | 46 | 36 | 40 | 44 | 48 | +----+----+----+----+----+----+----+----+ julia> subtile = (Layout(2, 3), Layout(2, 4)); # gather 2 elements with stride 3 along the first mode # and 2 elements with stride 4 along the second mode julia> print_layout(logical_divide(raked_prod, subtile)) (((1, 2), ((3, 1), (1, 1))), ((1, 2), ((4, 1), (1, 1)))):(((48, 1), ((_16, _1), (48, 2))), ((16, 2), ((_4, _2), (16, 4)))) 1 2 3 4 5 6 7 8 +----+----+----+----+----+----+----+----+ 1 | 1 | 3 | 5 | 7 | 9 | 11 | 13 | 15 | +----+----+----+----+----+----+----+----+ 2 | 2 | 4 | 6 | 8 | 10 | 12 | 14 | 16 | +----+----+----+----+----+----+----+----+ 3 | 17 | 19 | 21 | 23 | 25 | 27 | 29 | 31 | +----+----+----+----+----+----+----+----+ 4 | 18 | 20 | 22 | 24 | 26 | 28 | 30 | 32 | +----+----+----+----+----+----+----+----+ 5 | 33 | 35 | 37 | 39 | 41 | 43 | 45 | 47 | +----+----+----+----+----+----+----+----+ 6 | 34 | 36 | 38 | 40 | 42 | 44 | 46 | 48 | +----+----+----+----+----+----+----+----+ ``` """ function logical_divide(layout::Layout, tile::Layout) return composition(layout, make_layout(tile, complement(tile, size(layout)))) end function logical_divide(layout::Layout, tile::Tuple) length(tile) <= rank(layout) || throw(DimensionMismatch("too many modes in tile")) return transform_layout(logical_divide, layout, tile) end function logical_divide(layout::Layout, tile::Colon) return layout end function logical_divide(layout::Layout, tile::IntType) return logical_divide(layout, make_layout(tile)) end """ zipped_divide(layout::Layout, tile) Compute the logical division of `layout` by `tile`, then group the resulting subtiles into the first mode and the rest into the second mode. ```julia julia> raked_prod = @Layout ((3, 2), (4, 2)) ((16, 1), (4, 2)); julia> print_layout(raked_prod) ((_3, _2), (_4, _2)):((_16, _1), (_4, _2)) 1 2 3 4 5 6 7 8 +----+----+----+----+----+----+----+----+ 1 | 1 | 5 | 9 | 13 | 3 | 7 | 11 | 15 | +----+----+----+----+----+----+----+----+ 2 | 17 | 21 | 25 | 29 | 19 | 23 | 27 | 31 | +----+----+----+----+----+----+----+----+ 3 | 33 | 37 | 41 | 45 | 35 | 39 | 43 | 47 | +----+----+----+----+----+----+----+----+ 4 | 2 | 6 | 10 | 14 | 4 | 8 | 12 | 16 | +----+----+----+----+----+----+----+----+ 5 | 18 | 22 | 26 | 30 | 20 | 24 | 28 | 32 | +----+----+----+----+----+----+----+----+ 6 | 34 | 38 | 42 | 46 | 36 | 40 | 44 | 48 | +----+----+----+----+----+----+----+----+ julia> subtile = (@Layout(2, 3), @Layout(2, 4)); # gather 2 elements with stride 3 along the first mode and 2 elements with stride 4 along the second mode julia> print_layout(zipped_divide(raked_prod, subtile)) ((_2, _2), (_3, _4)):((_1, _2), (_16, _4)) 1 2 3 4 5 6 7 8 9 10 11 12 +----+----+----+----+----+----+----+----+----+----+----+----+ 1 | 1 | 17 | 33 | 5 | 21 | 37 | 9 | 25 | 41 | 13 | 29 | 45 | +----+----+----+----+----+----+----+----+----+----+----+----+ 2 | 2 | 18 | 34 | 6 | 22 | 38 | 10 | 26 | 42 | 14 | 30 | 46 | +----+----+----+----+----+----+----+----+----+----+----+----+ 3 | 3 | 19 | 35 | 7 | 23 | 39 | 11 | 27 | 43 | 15 | 31 | 47 | +----+----+----+----+----+----+----+----+----+----+----+----+ 4 | 4 | 20 | 36 | 8 | 24 | 40 | 12 | 28 | 44 | 16 | 32 | 48 | +----+----+----+----+----+----+----+----+----+----+----+----+ ``` """ function zipped_divide(layout::Layout, tile) return tile_unzip(logical_divide(layout, tile), tile) end """ tiled_divide(layout::Layout, tile) Similar to `zipped_divide`, but upack the second mode into multiple modes. """ function tiled_divide(layout::Layout, tile) d = zipped_divide(layout, tile) R = rank(d, 2) return d(:, repeat(:, R)) end function tile(l1::Layout, l2::Layout) return tiled_divide(l1, l2) end function tile(l1::Layout, l2::Layout, l3::Layout...) return tiled_divide(l1, (l2, l3...)) end """ make_fragment_like(::Layout) -> Layout make_fragment_like(T, ::MoYeArray) -> MoYeArray Make a compact layout of the same shape with the first mode being col-major, and with the rest following the given order. """ make_fragment_like(layout::StaticLayout{1}) = make_layout(shape(layout)) function make_fragment_like(layout::StaticLayout{R}) where {R} return tiled_product(make_layout(shape(layout)[1]), make_ordered_layout(make_layout(layout[2:end]...))) end make_fragment_like(layout::Layout) = make_layout(shape(layout)) make_fragment_like(shape::GenIntTuple) = make_layout(shape)
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
748
@inline isgmem(::MoYeArray{T, N, ViewEngine{T, LLVMPtr{M, AS.Global}}}) where {T, N, M} = true @inline isgmem(::MoYeArray) = false @inline issmem(::MoYeArray{T, N, ViewEngine{T, LLVMPtr{M, AS.Shared}}}) where {T, N, M} = true @inline issmem(::MoYeArray) = false @inline isrmem(x::MoYeArray) = !isgmem(x) && !issmem(x) @inline recast(::Type{T}, ptr::LLVMPtr{S, AS}) where {T, S, AS} = LLVM.Interop.addrspacecast(LLVMPtr{T, AS}, ptr) @inline recast(::Type{T}, ptr::Ptr) where {T} = reinterpret(Ptr{T}, ptr) Base.:(-)(x::LLVMPtr, ::StaticInt{N}) where {N} = x - N Base.:(-)(::StaticInt{N}, y::LLVMPtr) where {N} = y - N Base.:(+)(x::LLVMPtr, ::StaticInt{N}) where {N} = x + N Base.:(+)(::StaticInt{N}, y::LLVMPtr) where {N} = y + N
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
10939
function Base.show(io::IO, l::Layout) return print(io, shape(l), ":", stride(l)) end @inline Base.ndigits(@nospecialize x::StaticInt{N}) where {N} = ndigits(N) function print_layout(layout::Layout{2}) idx_width = ndigits(cosize(layout)) + 2 delim = "+-----------------" print(layout) print("\n") # Column indices print(" ") for n in 1:size(layout, 2) formatted_number = lpad(n, idx_width - 2) print(" ", formatted_number, " ") end println() # Print out A m-by-n for m in 1:size(layout, 1) # Header print(" ") for n in 1:size(layout, 2) print(view(delim, 1:(idx_width + 1))) end println("+") # Values print(lpad(m, 2), " ") # Row indices for n in 1:size(layout, 2) formatted_number = lpad(Int(layout(m, n)), idx_width - 2) print("| ", formatted_number, " ") end println("|") end # Footer print(" ") for n in 1:size(layout, 2) print(view(delim, 1:(idx_width + 1))) end return println("+") end function print_typst_mma( A::Layout, TA::Layout, B::Layout, TB::Layout, C::Layout, TC::Layout ) @assert size(A, 1) == size(C, 1) @assert size(B, 1) == size(C, 2) @assert size(A, 2) == size(B, 2) sA = known(size(TA)) sB = known(size(TB)) sC = known(size(TC)) height = (size(A, 1) + size(A, 2) + 2 + 2) * 2.35 width = (size(B, 1) + size(B, 2) + 3) * 2.35 # build table table_C_cells = Vector{String}() for m in 1:size(C, 1) push!(table_C_cells, "fill_cell(1),") for n in 1:size(C, 2) val_idx, thr_id = fldmod1(C(m, n), sC) thr_idx = TC(thr_id) push!(table_C_cells, "cell($thr_idx, $val_idx),") end push!(table_C_cells, "\n") end table_A_cells = Vector{String}() for m in 1:size(A, 1) push!(table_A_cells, "left_cell($m),") for k in 1:size(A, 2) val_idx, thr_id = fldmod1(A(m, k), sA) thr_idx = TA(thr_id) push!(table_A_cells, "cell($thr_idx, $val_idx),") end push!(table_A_cells, "\n") end table_B_cells = Vector{String}() for k in 1:size(B, 2) push!(table_B_cells, "left_cell($k),") for n in 1:size(B, 1) val_idx, thr_id = fldmod1(B(n, k), sB) thr_idx = TB(thr_id) push!(table_B_cells, "cell($thr_idx, $val_idx),") end push!(table_B_cells, "\n") end println(""" #set par(leading: 0.25em) #set page(width: $(width)em, height: $(height)em, margin: 0pt) #let color_map = ( rgb(175, 175, 255), rgb(175, 255, 175), rgb(255, 255, 175), rgb(255, 175, 175), rgb(210, 210, 255), rgb(210, 255, 210), rgb(255, 210, 210), aqua, teal, red.transparentize(50%), yellow.transparentize(50%), lime.transparentize(50%), fuchsia.transparentize(50%), color.mix((aqua, 10%), (red, 50%)).transparentize(10%), orange.transparentize(30%), purple.transparentize(50%), maroon.transparentize(70%), rgb(87, 127, 230).transparentize(30%), cmyk(27%, 0%, 3%, 5%), color.hsl(30deg, 50%, 60%).transparentize(30%), color.mix((red, 70%), (blue, 30%)).transparentize(30%), color.mix((red, 70%), (fuchsia, 30%)).transparentize(30%), color.mix((aqua, 70%), (fuchsia, 30%)).transparentize(10%), color.mix((purple, 70%), (blue, 30%)).transparentize(50%), color.mix((red, 30%), (fuchsia, 70%)).transparentize(50%), color.mix((eastern, 30%), (fuchsia, 70%)).transparentize(30%), color.mix((green, 50%), (olive, 50%)).transparentize(30%), color.mix((blue, 50%), (purple, 50%)).transparentize(30%), color.mix((yellow, 50%), (purple, 50%)).transparentize(30%), color.mix((orange, 50%), (purple, 50%)).transparentize(30%), color.mix((teal, 70%), (blue, 30%)).transparentize(30%), color.mix((aqua, 60%), (lime, 40%)).transparentize(30%), ) #let cell(thr_idx, val_idx) = table.cell(fill: color_map.at((thr_idx.bit-and(31))))[#block(width: 1.5em, height: 1.5em)[T#(thr_idx) \\ V#(val_idx)]] #let top_cell(i) = table.cell(stroke:none, align: horizon)[#(if i == 0 {block(width: 1.5em, height: 1.5em)[]} else {block(width: 1.5em, height: 1.5em)[#i]})] #let left_cell(i) = table.cell(x: 0, y:i, stroke:none, align: horizon)[#block(width: 1.5em, height: 1.5em)[#i]] #let fill_cell(i) = table.cell(stroke:none)[#block(width: 1.5em, height: 1.5em)[]] """) println(""" #let table_C = table( rows: $(known(size(C, 1))), columns: $(known(size(C, 2)+One())), align: center, table.header(..range($(known(size(C, 2)+One()))).map(fill_cell)), $(table_C_cells...)) """) println(""" #let table_A = table( columns: $(known(size(A, 2)+One())), rows: $(known(size(A, 1))), align: center, table.header(..range($(known(size(A, 2)+One()))).map(top_cell)), $(table_A_cells...)) """) println(""" #let table_B = table( columns: $(known(size(B, 1)+One())), rows: $(known(size(B, 2))), align: center, table.header(..range($(known(size(B, 1)+One()))).map(top_cell)), $(table_B_cells...)) """) print(""" #grid( columns: 2, rows: 2, gutter: 0pt, [], table_B, table_A, table_C )""") end function print_typst_copy(S, TS, D, TD) @assert rank(S) == 2 @assert rank(D) == 2 @assert size(S, 1) == size(D, 1) @assert size(S, 2) == size(D, 2) height = (size(S, 1) + 2) * 2.35 width = (size(S, 2) * 2 + 4) * 2.35 println(""" #set par(leading: 0.25em) #set page(width: $(width)em, height: $(height)em, margin: 0pt) #let color_map = ( rgb(175, 175, 255), rgb(175, 255, 175), rgb(255, 255, 175), rgb(255, 175, 175), rgb(210, 210, 255), rgb(210, 255, 210), rgb(255, 210, 210), aqua, teal, red.transparentize(50%), yellow.transparentize(50%), lime.transparentize(50%), fuchsia.transparentize(50%), color.mix((aqua, 10%), (red, 50%)).transparentize(10%), orange.transparentize(30%), purple.transparentize(50%), maroon.transparentize(70%), rgb(87, 127, 230).transparentize(30%), cmyk(27%, 0%, 3%, 5%), color.hsl(30deg, 50%, 60%).transparentize(30%), color.mix((red, 70%), (blue, 30%)).transparentize(30%), color.mix((red, 70%), (fuchsia, 30%)).transparentize(30%), color.mix((aqua, 70%), (fuchsia, 30%)).transparentize(10%), color.mix((purple, 70%), (blue, 30%)).transparentize(50%), color.mix((red, 30%), (fuchsia, 70%)).transparentize(50%), color.mix((eastern, 30%), (fuchsia, 70%)).transparentize(30%), color.mix((green, 50%), (olive, 50%)).transparentize(30%), color.mix((blue, 50%), (purple, 50%)).transparentize(30%), color.mix((yellow, 50%), (purple, 50%)).transparentize(30%), color.mix((orange, 50%), (purple, 50%)).transparentize(30%), color.mix((teal, 70%), (blue, 30%)).transparentize(30%), color.mix((aqua, 60%), (lime, 40%)).transparentize(30%), ) #let cell(thr_idx, val_idx) = table.cell(fill: color_map.at((thr_idx.bit-and(31))))[#block(width: 1.5em, height: 1.5em)[T#(thr_idx) \\ V#(val_idx)]] #let top_cell(i) = table.cell(stroke:none, align: horizon)[#(if i == 0 {block(width: 1.5em, height: 1.5em)[]} else {block(width: 1.5em, height: 1.5em)[#i]})] #let left_cell(i) = table.cell(x: 0, y:i, stroke:none, align: horizon)[#block(width: 1.5em, height: 1.5em)[#i]] #let fill_cell(i) = table.cell(stroke:none)[#block(width: 1.5em, height: 1.5em)[]] """) sS = known(size(TS)) sD = known(size(TD)) table_S_cells = Vector{String}() for m in 1:size(S, 1) push!(table_S_cells, "left_cell($m),") for n in 1:size(S, 2) val_idx, thr_id = fldmod1(S(m, n), sS) thr_idx = TS(thr_id) push!(table_S_cells, "cell($thr_idx, $val_idx),") end push!(table_S_cells, "\n") end table_D_cells = Vector{String}() for m in 1:size(D, 1) push!(table_D_cells, "left_cell($m),") for n in 1:size(D, 2) val_idx, thr_id = fldmod1(D(m, n), sD) thr_idx = TD(thr_id) push!(table_D_cells, "cell($thr_idx, $val_idx),") end push!(table_D_cells, "\n") end println(""" #let table_S = table( columns: $(known(size(S, 2)+One())), rows: $(known(size(S, 1))), align: center, table.header(..range($(known(size(S, 2)+One()))).map(top_cell)), $(table_S_cells...)) """) println(""" #let table_D = table( columns: $(known(size(D, 2)+One())), rows: $(known(size(D, 1))), align: center, table.header(..range($(known(size(D, 2)+One()))).map(top_cell)), $(table_D_cells...)) """) # put the two tables side by side print(""" #grid( columns: 2, rows: 1, gutter: 3em, table_S, table_D )""") end function print_typst end print_typst(m::MMAAtom) = print_typst(make_tiled_mma(m)) print_typst(c::CopyAtom) = print_typst(make_tiled_copy(c)) """ print_typst(::AbstractMMAAtom) Print the layout of the A, B, and C matrices in a typst format. Go to https://typst.app and paste the output to visualize the layout. ## Example ``` julia> tiled_mma = make_tiled_mma(MMAOP_8x8x4_F32F16F16F32_NT(), @Layout((2,2), (2,1)), (@Layout((4,4,2), (1,8,4)), _32, _4)) TiledMMA ThrLayoutVMNK: ((_4, _2), _2, _2, _1):((_1, _16), _8, _4, _0) PermutationMNK: ((_4, _4, _2):(_1, _8, _4), _32, _4) MMAAtom Thread ID: (_4, _2):(_1, _16) Layout_A_TV: ((_4, _2), _4):((_8, _4), _1) Layout_B_TV: ((_4, _2), _4):((_8, _4), _1) Layout_C_TV: ((_2, _2, _2), (_2, _2, _2)):((_1, _16, _4), (_8, _2, _32)) julia> print_typst(tiled_mma) ``` It will print the following image: ![](../assets/tiled_mma.png) """ function print_typst(tiled_mma::TiledMMA) A, TA = get_layoutA_MK(tiled_mma) B, TB = get_layoutB_NK(tiled_mma) C, TC = get_layoutC_MN(tiled_mma) print_typst_mma(A, TA, B, TB, C, TC) end """ print_typst(::AbstractCopyAtom) Print the layout of the source and destination matrices in a typst format. ## Example ```julia julia> tiled_copy = make_tiled_copy(CopyAtom{UniversalCopy{UInt128}, Float32}(), @Layout((32,8)), @Layout((4,1))) julia> print_typst(tiled_copy) ``` """ function print_typst(tiled_copy::TiledCopy) layoutS_MN, thrID_S = get_layoutS_MN(tiled_copy) layoutD_MN, thrID_D = get_layoutD_MN(tiled_copy) print_typst_copy(layoutS_MN, thrID_S, layoutD_MN, thrID_D) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
10651
Base.@assume_effects :terminates_locally @generated function coord_to_index0_itt(coord::IntType, shape::Tuple, stride::Tuple) N = length(shape.parameters) if N == 1 if shape.parameters[1] <: Tuple return :(coord_to_index0_itt(coord, shape[1], stride[1])) else return :(coord * stride[1]) end elseif coord == StaticInt{0} expr = Expr(:call, :+,) for i in 1:N if shape.parameters[i] <: Tuple push!(expr.args, :(coord_to_index0_itt(Zero(), shape[$i], stride[$i]))) else push!(expr.args, :(Zero() * stride[$i])) end end return expr else expr = Expr(:block, :(result = Zero()), :(new_coord = coord)) for i in 1:N if shape.parameters[i] <: Tuple push!(expr.args, :(result += coord_to_index0_itt(new_coord % product(shape[$i]), shape[$i], stride[$i]))) else push!(expr.args, :(result += (new_coord % shape[$i]) * stride[$i])) end push!(expr.args, :(new_coord = new_coord ÷ product(shape[$i]))) end push!(expr.args, :(return result)) return expr end end Base.@assume_effects :total function coord_to_index0(coord::IntType, shape::IntType, stride::IntType) @inline return coord * stride end Base.@assume_effects :total function coord_to_index0(coord::IntType, shape::IntTuple{N}, stride::IntTuple{N}) where {N} @inline return coord_to_index0_itt(coord, shape, stride) end @generated function coord_to_index0(coord::NTuple{N, <:StaticInt}, shape::NTuple{N, <:StaticInt}, stride::NTuple{N, <:StaticInt}) where {N} coord, stride = make_tuple(coord), make_tuple(stride) result = sum((c * s for (c, s) in zip(coord, stride))) return :($result) end Base.@assume_effects :total @generated function coord_to_index0(coord::IntTuple{N}, shape::IntTuple{N}, stride::IntTuple{N}) where {N} expr = Expr(:call, :+) for i in 1:N push!(expr.args, :(coord_to_index0(coord[$i], shape[$i], stride[$i]))) end return expr end Base.@assume_effects :total function coord_to_index0_horner(coord, shape, I1, Is...) if isempty(Is) return coord[I1] else return coord[I1] + shape[I1] * coord_to_index0_horner(coord, shape, Is...) end end Base.@assume_effects :total function coord_to_index0(coord::IntType, shape) return coord end Base.@assume_effects :total function coord_to_index0(coord::IntTuple{N}, shape::IntTuple{N}) where {N} flat_coord = flatten(coord) flat_shape = flatten(product_like(shape, coord)) coord_to_index0_horner(flat_coord, flat_shape, ntuple(identity, rank(flat_shape))...) end @inline _offset(x::Colon) = Zero() @inline _offset(x::DInt) = x - one(x) @inline _offset(x::StaticInt{N}) where {N} = StaticInt{N - 1}() @inline _offset(x::NTuple{N, Colon}) where {N} = ntuple(Returns(Zero()), Val(N)) @inline function _offset(x::NTuple{N, Int}) where {N} return ntuple(Base.Fix2(-, 1) ∘ Base.Fix1(getindex, x), Val(N)) end @inline _offset(x::Tuple) = map(_offset, x) Base.@assume_effects :total function coord_to_index(coord::IntType, shape, stride...) idx = coord_to_index0(coord - one(coord), shape, stride...) return idx + one(idx) end Base.@assume_effects :total function coord_to_index(coord, shape, stride...) idx = coord_to_index0(_offset(coord), shape, stride...) return idx + one(idx) end function index_to_coord(index::IntType, shape::StaticInt{1}, stride::IntType) return Zero() end function index_to_coord(index::IntType, shape::IntType, stride::IntType) crd = ((index - one(index)) ÷ stride) % shape return crd + one(crd) end function index_to_coord(index::IntType, shape::Tuple, stride::Tuple) length(shape) == length(stride) || throw(DimensionMismatch("shape, and stride must have the same rank")) return let index = index map((s, d) -> index_to_coord(index, s, d), shape, stride) end end function index_to_coord(index::IntType, shape::Tuple, stride::IntType) return let index = index map((s, d) -> index_to_coord(index, s, d), shape, compact_col_major(shape, stride)) end end function index_to_coord(index::Tuple, shape::Tuple, stride::Tuple) length(index) == length(shape) == length(stride) || throw(DimensionMismatch("index, shape, and stride must have the same rank")) return map(index_to_coord, index, shape, stride) end # default stride, compact + column major function index_to_coord(index::IntType, shape::IntType) @inline return index end function index_to_coord(index::IntType, shape::Tuple) return index_to_coord(index, shape, compact_col_major(shape, One())) end function index_to_coord(index::Tuple, shape::Tuple) length(index) == length(shape) || throw(DimensionMismatch("index and shape must have the same rank")) return map(index_to_coord, index, shape) end """ Transoform a coordinate in one shape to a coordinate in another shape. """ function coord_to_coord(@nospecialize(coord::Tuple), @nospecialize(src_shape::Tuple), @nospecialize(dst_shape::Tuple)) length(coord) == length(src_shape) == length(dst_shape) || throw(DimensionMismatch("coord, shape1, and shape2 must have the same rank")) return map(coord_to_coord, coord, src_shape, dst_shape) end function coord_to_coord(coord, src_shape, dst_shape) return index_to_coord(coord_to_index(coord, src_shape), dst_shape) end struct LayoutLeft end struct LayoutRight end """ GenColMajor [`make_layout`](@ref) uses this to create a col-major compact layout. ```julia julia> make_layout(((1, (2, 4)), 1), MoYe.GenColMajor) ((1, (2, 4)), 1):((_1, (1, 2)), 8) ``` """ const GenColMajor = LayoutLeft """ GenRowMajo [`make_layout`](@ref) uses this to create a row-major compact layout. ```julia julia> make_layout(((1, (2, 4)), 1), MoYe.GenRowMajor) ((1, (2, 4)), 1):((8, (4, 1)), _1) ``` """ const GenRowMajor = LayoutRight struct CompactLambda{Major} end function compact_inner_left(init, shape) for si in shape.parameters current = init[2] result = if si == One (Zero, current) elseif si <: StaticInt (current, si * current) else # if si <: Tuple compact_inner_left((Tuple{}, current), si) end @inbounds init = (append(init[1], result[1]), result[2]) end return init end @generated function compact(shape::StaticIntTuple, current::StaticInt, ::Type{LayoutLeft}) return :($(map(make_tuple, compact_inner_left((Tuple{}, current), shape)))) end function compact_inner_right(init, _shape) shape = reverse(_shape) for si in shape.parameters current = init[2] result = if si == One (Zero, current) elseif si <: StaticInt (current, si * current) else # if si <: Tuple compact_inner_right((Tuple{}, current), si) end @inbounds init = (prepend(init[1], result[1]), result[2]) end return init end @generated function compact(shape::StaticIntTuple, current::StaticInt, ::Type{LayoutRight}) return :($(map(make_tuple, compact_inner_right((Tuple{}, current), shape)))) end Base.@assume_effects :total function compact(shape::Tuple, current::IntType, ::Type{LayoutLeft}) return _foldl(CompactLambda{LayoutLeft}(), shape, ((), current)) end Base.@assume_effects :total function compact(shape::Tuple, current::IntType, ::Type{LayoutRight}) return _foldl(CompactLambda{LayoutRight}(), reverse(shape), ((), current)) end function compact(shape::StaticInt{1}, current::StaticInt, ::Type{Major}) where {Major} @inline return (Zero(), current) end function compact(shape::StaticInt{1}, current::Integer, ::Type{Major}) where {Major} @inline return (Zero(), current) end @generated function compact(shape::StaticInt{N}, current::StaticInt{M}, ::Type{Major}) where {Major, N, M} return :((current, $(StaticInt{N * M}()))) end function compact(shape::IntType, current::IntType, ::Type{Major}) where {Major} @inline return (current, current * shape) end function compact_major(shape::Tuple, current::Tuple, ::Type{Major}) where {Major} length(shape) == length(current) || throw(DimensionMismatch("shape and current must have the same rank")) return map((s, c) -> compact_major(s, c, Major), shape, current) end function compact_major(shape, current::IntType, major::Type{Major}) where {Major} return @inbounds first(compact(shape, current, major)) end Base.@assume_effects :total function (::CompactLambda{LayoutLeft})(init, si) result = compact(si, init[2], LayoutLeft) return @inbounds (append(init[1], result[1]), result[2]) end Base.@assume_effects :total function (::CompactLambda{LayoutRight})(init, si) result = compact(si, init[2], LayoutRight) return @inbounds (prepend(init[1], result[1]), result[2]) end compact_col_major(shape, current=One()) = compact_major(shape, current, LayoutLeft) compact_row_major(shape, current=One()) = compact_major(shape, current, LayoutRight) function compact_order(shape::Tuple, order::Tuple, old_shape, old_order) return let old_shape = old_shape, old_order = old_order map((x, y) -> compact_order(x, y, old_shape, old_order), shape, order) end end function compact_order(shape, order::StaticInt, old_shape, old_order) d = let order = order product(map((s, o) -> ifelse(o < order, product(s), One()), old_shape, old_order)) end return compact_col_major(shape, d) end function compact_order(shape, order) old_shape = flatten_to_tuple(product_like(shape, order)) flat_order = flatten_to_tuple(order) max_order = _foldl(flat_order, Zero()) do v, o ifelse(Static.le(v, o) isa Static.True, o, v) end old_order = map(ntuple(i->static(i+max_order), Val(rank(flat_order))), flat_order) do seq_v, o ifelse(o isa StaticInt, o, seq_v) end new_order = unflatten(old_order, order) return compact_order(shape, new_order, old_shape, old_order) end function compact_order(shape, ::Type{LayoutLeft}) return compact_col_major(shape) end function compact_order(shape, ::Type{LayoutRight}) return compact_row_major(shape) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1938
const IntType = Union{Int, StaticInt, Int32} const DInt = Union{Int, Int32} function Base.promote_rule(@nospecialize(T1::Type{<:StaticInt}), T2::Type{Int32}) return Int32 end Base.rem(x::Int32, ::StaticInt{Y}) where {Y} = rem(x, Int32(Y)) Base.rem(::StaticInt{X}, y::Int32) where {X} = rem(Int32(X), y) Base.div(x::Int32, ::StaticInt{Y}) where {Y} = div(x, Int32(Y)) Base.div(::StaticInt{X}, y::Int32) where {X} = div(Int32(X), y) uint_bit(::StaticInt{8}) = UInt8 uint_bit(::StaticInt{16}) = UInt16 uint_bit(::StaticInt{32}) = UInt32 uint_bit(::StaticInt{64}) = UInt64 uint_bit(::StaticInt{128}) = UInt128 uint_bytes(::StaticInt{N}) where {N} = uint_bit(static(8*N)) @generated sizeof_bits(::Type{T}) where {T} = :($(static(sizeof(T)*8))) @inline Base.:(==)(::StaticInt{N}, ::StaticInt{N}) where {N} = true @inline Base.:(==)(@nospecialize(x::StaticInt), @nospecialize(y::StaticInt)) = false @inline Base.:(*)(::Type{StaticInt{N}}, ::Type{StaticInt{M}}) where {N, M} = StaticInt{N*M} function Base.show(io::IO, ::MIME"text/plain", @nospecialize(x::StaticInt)) print(io, "_" * repr(known(typeof(x)))) nothing end @generated function Base.abs(::StaticInt{N}) where {N} return quote Base.@_inline_meta return $(StaticInt{abs(N)}()) end end const _0 = StaticInt{0}() const _1 = StaticInt{1}() const _2 = StaticInt{2}() const _3 = StaticInt{3}() const _4 = StaticInt{4}() const _5 = StaticInt{5}() const _6 = StaticInt{6}() const _7 = StaticInt{7}() const _8 = StaticInt{8}() const _9 = StaticInt{9}() const _10 = StaticInt{10}() const _16 = StaticInt{16}() const _32 = StaticInt{32}() const _64 = StaticInt{64}() const _128 = StaticInt{128}() const _256 = StaticInt{256}() const _512 = StaticInt{512}() const _1024 = StaticInt{1024}() const _2048 = StaticInt{2048}() const _4096 = StaticInt{4096}() const _8192 = StaticInt{8192}()
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
7450
@inline function make_fragment_like(::Type{T}, layout::Layout) where {T} return MoYeArray{T}(undef, make_fragment_like(layout)) end @inline function make_fragment_like(::Type{T}, x::MoYeArray) where {T} return make_fragment_like(T, layout(x)) end @inline function make_fragment_like(x::MoYeArray{T}) where {T} return make_fragment_like(T, x) end # make_identity_tensor # Layout manipulation, should return a non-owning MoYeArray @inline flatten(@nospecialize(x::MoYeArray)) = MoYeArray(pointer(x), flatten(layout(x))) @inline Base.coalesce(@nospecialize x::MoYeArray) = MoYeArray(pointer(x), coalesce(layout(x))) @inline function Base.coalesce(@nospecialize(x::MoYeArray), @nospecialize(trg_profile::IntTuple)) return MoYeArray(pointer(x), coalesce(layout(x), trg_profile)) end @inline function group_modes(@nospecialize(x::MoYeArray), B::IntType, E::IntType) return MoYeArray(pointer(x), group(layout(x), B, E)) end # Algebra @inline function logical_divide(@nospecialize(x::MoYeArray), tile) return MoYeArray(pointer(x), logical_divide(layout(x), tile)) end @inline function zipped_divide(@nospecialize(x::MoYeArray), tile) return MoYeArray(pointer(x), zipped_divide(layout(x), tile)) end @inline function tiled_divide(@nospecialize(x::MoYeArray), tile) return MoYeArray(pointer(x), tiled_divide(layout(x), tile)) end @inline function local_partition(x::MoYeArray{T,N}, tile::Tile, coord::Tuple) where {T,N} return view(zipped_divide(x, tile), coord, ntuple(i -> Colon(), Val(N))) end @inline function local_partition(@nospecialize(x::MoYeArray), tile::Layout, index::DInt) return local_partition(x, map(capacity, shape(tile)), get_congr_coord(tile, index)) end @inline function local_partition(@nospecialize(x::MoYeArray), tile::Tile, coord, proj) return local_partition(x, dice(tile, proj), dice(coord, proj)) end @inline function local_partition(@nospecialize(x::MoYeArray), tile::Layout, index::Integer, proj) return local_partition(x, dice(map(capacity, shape(tile)), proj), get_congr_coord(dice(tile, proj), index)) end function composition(x::MoYeArray, l) @inline return MoYeArray(pointer(x), composition(layout(x), l)) end function Base.:(∘)(x::MoYeArray, l) @inline return MoYeArray(pointer(x), composition(layout(x), l)) end """ @parallelize x::MoYeArray threadgroup_layout::Layout thread_idx::Int Partition `x` with `size(threadgroup_layout)` threads, and return the view of the entries that the thread at `thread_idx` will work on. ## Examples Say we have a [`MoYeArray`](@ref) `x` of shape `(6, 8)` and 4 threads of shape (2, 2). We would like to partition `x` with the 4 threads and get a view of the entries that the first thread will work on. We can do this by calling `@parallelize(x, (2, 2), 1)`. ```julia julia> a = MoYeArray(pointer([i for i in 1:48]), @Layout((6,8))) 6×8 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{Static.StaticInt{6}, Static.StaticInt{8}}, Tuple{Static.StaticInt{1}, Static.StaticInt{6}}}}: 1 7 13 19 25 31 37 43 2 8 14 20 26 32 38 44 3 9 15 21 27 33 39 45 4 10 16 22 28 34 40 46 5 11 17 23 29 35 41 47 6 12 18 24 30 36 42 48 julia> @parallelize a (_2, _2) (1, 1) 3×4 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{Static.StaticInt{3}, Static.StaticInt{4}}, Tuple{Static.StaticInt{2}, Static.StaticInt{12}}}}: 1 13 25 37 3 15 27 39 5 17 29 41 ``` You can also pass in a thread layout and a thread id to get the tile: ```julia julia> @parallelize a @Layout((2,2), (1, 2)) 2 3×4 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{StaticInt{3}, StaticInt{4}}, Tuple{StaticInt{2}, StaticInt{12}}}}: 2 14 26 38 4 16 28 40 6 18 30 42 julia> @parallelize a @Layout((2,2), (2, 1)) 2 3×4 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{StaticInt{3}, StaticInt{4}}, Tuple{StaticInt{2}, StaticInt{12}}}}: 7 19 31 43 9 21 33 45 11 23 35 47 ``` """ macro parallelize(x, tile, coord, proj...) if length(proj) == 0 return quote local_partition($(esc(x)), static($(esc(tile))), $(esc(coord))) end else return quote local_partition($(esc(x)), static($(esc(tile))), $(esc(coord)), static($(esc(proj[1])))) end end end @inline function local_tile(x::MoYeArray, tile::Tile, coord::Tuple) R1 = length(tile) R2 = rank(x) return view(zipped_divide(x, tile), ntuple(i -> :, R1), append(coord, :, StaticInt{R2}())) end @inline function local_tile(x::MoYeArray, tile::Tile, coord::Tuple, proj) return local_tile(x, dice(tile, proj), dice(coord, proj)) end """ @tile x::MoYeArray threadgroup_shape::Tile threadgroup_coord::Tuple Partition `x` with `threadgroup_shape`. Return the view of the entries of `x` that the thread group at `threadgroup_coord` will work on. ## Examples ```julia julia> a = MoYeArray(pointer([i for i in 1:48]), @Layout((6,8))) 6×8 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{Static.StaticInt{6}, Static.StaticInt{8}}, Tuple{Static.StaticInt{1}, Static.StaticInt{6}}}} with indices _1:_6×_1:_8: 1 7 13 19 25 31 37 43 2 8 14 20 26 32 38 44 3 9 15 21 27 33 39 45 4 10 16 22 28 34 40 46 5 11 17 23 29 35 41 47 6 12 18 24 30 36 42 48 julia> @tile a (_2, _2) (1, 1) 2×2 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{StaticInt{2}, StaticInt{2}}, Tuple{StaticInt{1}, StaticInt{6}}}}: 1 7 2 8 ``` """ macro tile(x, tile, coord, proj...) if length(proj) == 0 return quote local_tile($(esc(x)), static($(esc(tile))), $(esc(coord))) end else return quote local_tile($(esc(x)), static($(esc(tile))), $(esc(coord)), static($(esc(proj[1])))) end end end @inline function Base.fill!(x::NonOwningArray, val) vx = engine(x) @loopinfo unroll for i in eachindex(x) @inbounds vx[i] = val end return x end @inline Base.fill!(x::OwningArray, val) = @gc_preserve fill!(x, val) @inline function Base.sum(x::NonOwningArray{T}) where T vx = engine(x) tmp = zero(T) @loopinfo unroll for i in 1:length(x) @inbounds tmp += vx[i] end return tmp end @inline Base.sum(x::OwningArray) = @gc_preserve sum(x) """ zeros!(x::MoYeArray) Fill `x` with zeros. """ @inline zeros!(x::MoYeArray) = fill!(x, zero(eltype(x))) function max_common_vector(src::MoYeArray{TS}, dst::MoYeArray{TD}) where {TS, TD} if sizeof(TS) == sizeof(TD) && isbitstype(TS) && isbitstype(TD) return max_common_vector(src.layout, dst.layout) else return Zero() end end @device_override function foreach(f::F, x::MoYeArray) where {F} @loopinfo unroll for i in eachindex(x) f(x[i]) end return nothing end @device_override function map!(f::F, x::MoYeArray, y::MoYeArray) where {F} @loopinfo unroll for i in eachindex(x) x[i] = f(y[i]) end return nothing end @device_override @inline function map!(f::F, x::MoYeArray) where {F} @loopinfo unroll for i in eachindex(x) x[i] = f(x[i]) end return nothing end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
10302
@inline function axpby!(a::Number, X::MoYeArray{T}, b::Number, Y::MoYeArray{T}) where {T} x, y = ManualMemory.preserve_buffer(X), ManualMemory.preserve_buffer(Y) vx, vy = ViewEngine(pointer(X)), ViewEngine(pointer(Y)) GC.@preserve x y begin @loopinfo unroll for i in eachindex(vx) @inbounds vy[i] = iszero(b) ? a * vx[i] : a * vx[i] + b * vy[i] end end end @inline gemm!(A::MoYeArray, B::MoYeArray, C::MoYeArray) = gemm!(C, A, B, C) @inline gemm!(mma::AbstractMMAAtom, A::MoYeArray, B::MoYeArray, C::MoYeArray) = gemm!(mma,C,A,B,C) function gemm!(D::MoYeArray{TD}, A::MoYeArray{TA}, B::MoYeArray{TB}, C::MoYeArray{TC}) where {TD,TA,TB,TC} @inline return gemm!(MMAAtom{UniversalFMA{TD,TA,TB,TC}}(), D, A, B, C) end # element-wise multiplication (1,1,1,1) function gemm!(mma_atom::AbstractMMAAtom, D::LocalArray{DT,1}, A::LocalArray{DA,1}, B::LocalArray{DB,1}, C::LocalArray{DC,1}) where {DT,DA,DB,DC} @inline apply(mma_atom, D, A, B, C) return nothing end # outer product (2,1,1,2) -> (2,2,2,2) @generated function gemm!(mma_atom::AbstractMMAAtom, D::LocalArray{DT,2}, A::LocalArray{DA,1}, B::LocalArray{DB,1}, C::LocalArray{DC,2}) where {DT,DA,DB,DC} @assert size(layout(A), _1) == size(layout(C), _1) == size(layout(D), _1) # M @assert size(layout(B), _1) == size(layout(C), _2) == size(layout(D), _2) # N return quote Base.@_inline_meta gemm!(mma_atom, D, append_dim(A, _2), append_dim(B, _2), C) end end # matrix multiplication (2,2,2,2) -> (3,3,3,3) @generated function gemm!(mma_atom::AbstractMMAAtom, D::LocalArray{DT,2}, A::LocalArray{DA,2}, B::LocalArray{DB,2}, C::LocalArray{DC,2}) where {DT,DA,DB,DC} @assert size(layout(A), _1) == size(layout(C), _1) == size(layout(D), _1) # M @assert size(layout(B), _1) == size(layout(C), _2) == size(layout(D), _2) # N @assert size(layout(A), _2) == size(layout(B), _2) # K return quote Base.@_inline_meta gemm!(mma_atom, prepend_dim(D, _3), prepend_dim(A, _3), prepend_dim(B, _3), prepend_dim(C, _3)) end end # batched outer product (3,2,2,3) -> (1,1,1,1) @generated function gemm!(mma_atom::AbstractMMAAtom, D::LocalArray{DT,3}, A::LocalArray{DA,2}, B::LocalArray{DB,2}, C::LocalArray{DC,3}) where {DT,DA,DB,DC} @assert size(layout(A), _2) == size(layout(C), _2) == size(layout(D), _2) # M @assert size(layout(B), _2) == size(layout(C), _3) == size(layout(D), _3) # N @assert size(layout(C), _1) == size(layout(D), _1) M = size(layout(A), _2) return quote Base.@_inline_meta @loopinfo unroll for n in axes(B, 2) @loopinfo unroll for m in axes(A, 2) ms = Bool(n & 1) ? m : $(M()+_1)-m gemm!(mma_atom, view(D, :, ms, n), view(A, :, ms), view(B, :, n), view(C, :, ms, n)) end end return nothing end end # batched matrix multiplication (3,3,3,3) -> (3,2,2,3) @generated function gemm!(mma_atom::AbstractMMAAtom, D::LocalArray{DT,3}, A::LocalArray{DA,3}, B::LocalArray{DB,3}, C::LocalArray{DC,3}) where {DT,DA,DB,DC} @assert size(layout(A), _2) == size(layout(C), _2) == size(layout(D), _2) # M @assert size(layout(B), _2) == size(layout(C), _3) == size(layout(D), _3) # N @assert size(layout(A), _3) == size(layout(B), _3) # K @assert size(layout(C), _1) == size(layout(D), _1) return quote Base.@_inline_meta @loopinfo unroll for k in axes(A, 3) gemm!(mma_atom, D, view(A, :, :, k), view(B, :, :, k), C) end return nothing end end # (2,2,2,2) -> (3,3,3,3) @generated function gemm!(mma_atom::AbstractMMAAtom, D::LocalArray{DT, 2}, A::SharedArray{DA, 2}, B::SharedArray{DB, 2}, C::LocalArray{DC, 2}) where {DT,DA,DB,DC} @assert size(layout(A), _1) == size(layout(C), _1) == size(layout(D), _1) # M @assert size(layout(B), _1) == size(layout(C), _2) == size(layout(D), _2) # N @assert size(layout(A), _2) == size(layout(B), _2) # K @assert size(layout_a(mma_atom()), _2) == One() @assert size(layout_b(mma_atom()), _2) == One() @assert size(layout_c(mma_atom()), _2) == One() return quote gemm!(mma_atom, prepend_dim(D, _3), prepend_dim(A, _3), prepend_dim(B, _3), prepend_dim(C, _3)) end end # (3,3,3,3) -> (3,2,2,3) @generated function gemm!(mma_atom::AbstractMMAAtom, D::LocalArray{DT,3}, A::SharedArray{DA,3}, B::SharedArray{DB,3}, C::LocalArray{DC,3}) where {DT, DA, DB, DC} @assert size(layout(A), _2) == size(layout(C), _2) == size(layout(D), _2) # M @assert size(layout(B), _2) == size(layout(C), _3) == size(layout(D), _3) # N @assert size(layout(A), _3) == size(layout(B), _3) # K @assert size(layout(C), _1) == size(layout(D), _1) return quote Base.@_inline_meta rA = make_fragment_A(mma_atom, A) rB = make_fragment_B(mma_atom, B) @loopinfo unroll for k in axes(A,3) _copyto!(view(rA, :, :, k), view(A, :, :, k)) _copyto!(view(rB, :, :, k), view(B, :, :, k)) gemm!(mma_atom, D, view(rA, :, :, k), view(rB, :, :, k), C) end return nothing end end function gemm!(thr_mma::ThrMMA, alpha, A::SharedArray{TA,2}, B::SharedArray{TB,2}, beta, C::SharedArray{TC,2}, transform_A, transform_B) where {TA,TB,TC} @assert size(layout(A), 1) == size(layout(C), 1) # M @assert size(layout(B), 1) == size(layout(C), 2) # N @assert size(layout(A), 2) == size(layout(B), 2) # K @assert Core.Compiler.return_type(transform_A, Tuple{TA}) == TA @assert Core.Compiler.return_type(transform_B, Tuple{TB}) == TB M = size(layout(C), 1) N = size(layout(C), 2) K = size(layout(A), 2) BLK_M = tile_size(thr_mma, 1) BLK_N = tile_size(thr_mma, 2) BLK_K = tile_size(thr_mma, 3) m_residue = M - BLK_M * (cld(M, BLK_M) - One()) n_residue = N - BLK_N * (cld(N, BLK_N) - One()) k_residue = K - BLK_K * cld(K, BLK_K) sA = MoYeArray(pointer(A, (1, k_residue+1)), layout(A)) sB = MoYeArray(pointer(B, (1, k_residue+1)), layout(B)) rounded_sA = sA ∘ (cld(M, BLK_M) * BLK_M, cld(K, BLK_K) * BLK_K) rounded_sB = sB ∘ (cld(N, BLK_N) * BLK_N, cld(K, BLK_K) * BLK_K) rounded_sC = sC ∘ (cld(M, BLK_M) * BLK_M, cld(N, BLK_N) * BLK_N) thr_A = partition_A(thr_mma, rounded_sA) thr_B = partition_B(thr_mma, rounded_sB) thr_C = partition_C(thr_mma, rounded_sC) rA = make_fragment_A(thr_mma, thr_A) rB = make_fragment_B(thr_mma, thr_B) rC = make_fragment_C(thr_mma, thr_C) # predication thr_pA = MoYeArray{Bool}(undef, static_size(thr_A, 1)) thr_pB = MoYeArray{Bool}(undef, static_size(thr_B, 1)) cA = make_identity_array((BLK_M, BLK_K)) cB = make_identity_array((BLK_N, BLK_K)) thr_cA = partition_A(thr_mma, cA) thr_cB = partition_B(thr_mma, cB) @loopinfo unroll for i in size(layout(thr_pA)) @inbounds thr_pA[i] = elem_less(thr_cA[i][1], m_residue) end @loopinfo unroll for i in size(layout(thr_pB)) @inbounds thr_pB[i] = elem_less(thr_cB[i][1], n_residue) end # load A @loopinfo unroll for i in axes(thr_A, 1) if k_residue == Zero() || thr_cA[i][2] > -k_residue @loopinfo unroll for m in axes(thr_A, 2) @inbounds rA[i, m, 1] = (m_residue == BLK_M || m <= static_size(thr_A, 2) || thr_pA[i]) ? transform_A(thr_A[i, m, 1]) : zero(TA) end end end # load B @loopinfo unroll for i in axes(thr_B, 1) if k_residue == Zero() || thr_cB[i][2] > -k_residue @loopinfo unroll for n in axes(thr_B, 2) @inbounds rB[i, n, 1] = (n_residue == BLK_N || n <= static_size(thr_B, 2) || thr_pB[i]) ? transform_B(thr_B[i, n, 1]) : zero(TB) end end end zeros!(rC) K_BLK_MAX = static_size(thr_A, 3) @loopinfo unroll for k_blk in 1:K_BLK_MAX if k_blk < K_BLK_MAX k_next = k_blk + 1 @loopinfo unroll for m in axes(thr_A, 2) @loopinfo unroll for i in axes(thr_A, 1) rA[i, m, k_next] = (m_residue == BLK_M || m <= static_size(thr_A, 2) || thr_pA[i]) ? transform_A(thr_A[i, m, k_next]) : zero(TA) end end @loopinfo unroll for n in axes(thr_B, 2) @loopinfo unroll for i in axes(thr_B, 1) rB[i, n, k_next] = (n_residue == BLK_N || n <= static_size(thr_B, 2) || thr_pB[i]) ? transform_B(thr_B[i, n, k_next]) : zero(TB) end end gemm!(thr_mma, view(rA, :, :, k_blk), view(rB, :, :, k_blk), rC) end end ############ # Epilogue # ############ cC = make_identity_array((BLK_M, BLK_N)) thr_cC = partition_C(thr_mma, cC) is_beta_zero = iszero(beta) @loopinfo unroll for m in axes(thr_C, 2) @loopinfo unroll for n in axes(thr_C, 3) @loopinfo unroll for i in axes(thr_C, 1) if (m_residue == BLK_M || m <= static_size(thr_C, 2) || thr_cC[i][1] <= m_residue) && (n_residue == BLK_N || n <= static_size(thr_C, 3) || thr_cC[i][2] <= n_residue) @inbounds thr_C[i, m, n] = is_beta_zero ? alpha * rC[i, m, n] : alpha * rC[i, m, n] + beta * thr_C[i, m, n] end end end end return nothing end function gemm!(thr_mma::ThrMMA, alpha, A::SharedArray{TA,2}, B::SharedArray{TB,2}, beta, C::SharedArray{TC,2}) where {TA,TB,TC} gemm!(thr_mma, alpha, A, B, beta, C, identity, identity) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
5853
struct TrivialPred end @inline (::TrivialPred)(i) = true function _copyto_if!(dest::NonOwningArray, src::NonOwningArray, mask) copy_op = select_elementwise_copy(src, dest) # would select async copy if dest is shared memory and src is global memory @loopinfo unroll for i in _1:size(src.layout) if mask(i) apply(copy_op, pointer(dest, i), pointer(src, i)) end end return dest end function copyto_if!(dest::MoYeArray, src::MoYeArray, mask) @gc_preserve _copyto_if!(dest, src, mask) end @generated function _copyto_vec!(dest::MoYeArray{TD}, src::MoYeArray{TS}, ::Type{TV}) where {TD,TS,TV} if (sizeof(TD) == sizeof(TS)) && sizeof(TV) > sizeof(TD) return quote Base.@_inline_meta src_v = recast(TV, src) dest_v = recast(TV, dest) return _copyto_if!(dest_v, src_v, TrivialPred()) end else return quote Base.@_inline_meta return _copyto_if!(dest, src, TrivialPred()) end end end """ copyto!(dest::MoYeArray, src::MoYeArray) Copy the contents of `src` to `dest`. The function automatically carries out potential vectorization. In particular, while transferring data from global memory to shared memory, it automatically initiates asynchronous copying, if your device supports so. """ function Base.copyto!(dest::MoYeArray, src::MoYeArray) @inline @gc_preserve _copyto!(dest, src) return dest end function _copyto!(dest::NonOwningArray, src::NonOwningArray) @inline return _copyto!(dest, src, _8) end function _copyto!(dest::NonOwningArray{TD}, src::NonOwningArray{TS}, align::StaticInt{N}) where {TD,TS, N} vec_elem = max_common_vector(src, dest) src_bits = sizeof(TS) * 8 #vec_bits = is_static(layout(src)) && is_static(layout(dest)) ? # min(vec_elem * src_bits, 128) : # min(vec_elem * src_bits, N) vec_bits = 8 # explicitly disable vectorization for now if vec_elem > 1 && vec_bits > 8 return _copyto_vec!(dest, src, uint_bit(static(vec_bits))) else return _copyto_if!(dest, src, TrivialPred()) end end @inline group_tail(l::Layout{2}) = l @inline group_tail(l::Layout{N}) where {N} = group(l, _2, StaticInt{N}()) @inline group_tail(l::Tuple{Vararg{Union{IntType, Tuple}, 2}}) = l @inline group_tail(l::Tuple{Vararg{Union{IntType, Tuple}, N}}) where {N} = (Base.first(l), Base.tail(l)) function generate_copy_atom_loops(dst, src, dst_shape, src_shape, n_src, n_dst, d=1) expr = Expr(:block) dst_v = Symbol(:dst_v_, d) src_v = Symbol(:src_v_, d) loop_var = Symbol(:i_, d) grouped_dst_shape = group_tail(dst_shape) grouped_src_shape = group_tail(src_shape) push!(expr.args, :($dst_v = MoYeArray(pointer($dst), group_tail(layout($dst)[_1])))) push!(expr.args, :($src_v = MoYeArray(pointer($src), group_tail(layout($src)[_1])))) loop = Expr(:for, :($loop_var = _1:$(product(grouped_dst_shape[2])))) loopbody = Expr(:block) sliced_dst = Symbol(:sliced_dst_, d) sliced_src = Symbol(:sliced_src_, d) push!(loopbody.args, :($sliced_dst = view($dst_v, :, $loop_var))) push!(loopbody.args, :($sliced_src = view($src_v, :, $loop_var))) # here we use the fact that each slice has the same layout sliced_layout_dst = slice(grouped_dst_shape, (:, 1)) sliced_layout_src = slice(grouped_src_shape, (:, 1)) if typeof(product(grouped_dst_shape[1])) == n_dst || typeof(product(grouped_src_shape[1])) == n_src push!(loopbody.args, :(copyto_unpack!(copy_atom, $sliced_dst, $sliced_src))) else new_layout_dst = grouped_dst_shape[1] new_layout_src = grouped_src_shape[1] push!(loopbody.args, generate_copy_atom_loops(sliced_dst, sliced_src, new_layout_dst, new_layout_src, n_src, n_dst, d+1)) end push!(loopbody.args, :($(Expr(:loopinfo, (Symbol("llvm.loop.unroll.enable"), 1))))) push!(loop.args, loopbody) push!(expr.args, loop) return expr end function Base.copyto!(copy_atom::AbstractCopyAtom, dst::MoYeArray, src::MoYeArray) @inline @gc_preserve _copyto!(copy_atom, dst, src) return dst end function _copyto!(copy_atom::AbstractCopyAtom, dst::NonOwningArray, src::NonOwningArray) @inline return _copyto!(copy_atom, dst, src, TrivialPred()) end @generated function _copyto!(copy_atom::AbstractCopyAtom, dst::NonOwningArray{TD,N}, src::NonOwningArray{TS, N}, pred) where {TD, TS, N} expr = generate_copy_atom_loops(:sliced_dst, :sliced_src, make_tuple(shape(layout(dst)[_1])), make_tuple(shape(layout(src)[_1])), num_val_src(copy_atom), num_val_dst(copy_atom)) return quote dst_v = group_modes(dst, _2, StaticInt{$N}()) src_v = group_modes(src, _2, StaticInt{$N}()) @loopinfo unroll for i in 1:size(layout(src_v), 2) if pred(i) sliced_dst = view(dst_v, :, i) sliced_src = view(src_v, :, i) $expr end end return dst end end #Base.@assume_effects :foldable function _copyto!(copy_atom::AbstractCopyAtom, dst::NonOwningArray{TD, 1}, src::NonOwningArray{TS, 1}, pred) where {TD, TS} # @inline # apply(copy_atom, dst, src) #end #Base.@assume_effects :foldable function _copyto!(copy_atom::AbstractCopyAtom, dst::NonOwningArray{TD, N}, src::NonOwningArray{TS, N}, pred) where {TD, TS, N} # src_v = group_modes(src, _2, StaticInt{N}()) # dst_v = group_modes(dst, _2, StaticInt{N}()) # for i in 1:size(src_v.layout, 2) # if pred(i) # apply(copy_atom, view(dst_v, :, i), view(src_v, :, i)) # end # end #end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
7398
# we don't overload Base.front, the following finds the first non-tuple element @inline front(@nospecialize(t::Tuple)) = front(first(t)) @inline front(@nospecialize(x::Type{<:Tuple})) = front(first(x.parameters)) @inline front(x) = x @inline back(@nospecialize(t::Tuple)) = back(getindex(t, length(t))) @inline back(@nospecialize(x::Type{<:Tuple})) = back(getindex(x.parameters, length(x.parameters))) @inline back(x) = x @inline unwrap(@nospecialize(t::Tuple)) = isone(nfields(t)) ? unwrap(first(t)) : t @inline unwrap(x) = x @inline flatten_to_tuple(@nospecialize x::NTuple{N, Union{Int, StaticInt, Colon}}) where {N} = x @inline flatten_to_tuple(@nospecialize x::Tuple) = (flatten_to_tuple(first(x))..., flatten_to_tuple(Base.tail(x))...) @inline flatten_to_tuple(x) = tuple(x) # recursive flatten @inline flatten(@nospecialize x::NTuple{N, Union{Int, StaticInt, Colon }}) where {N} = x @inline flatten(@nospecialize x::Tuple) = (flatten(first(x))..., flatten(Base.tail(x))...) @inline flatten(x) = x tuple_cat(x) = x tuple_cat(x, y, z...) = (x..., tuple_cat(y, z...)...) function unflatten_impl(flat_tuple::Tuple, target::Tuple) return _foldl(target, ((), flat_tuple)) do v, t result, remaining_tuple = v sub_result, sub_tuple = unflatten_impl(remaining_tuple, t) (append(result, sub_result), sub_tuple) end end unflatten_impl(flat_tuple::Tuple, target) = (Base.first(flat_tuple), Base.tail(flat_tuple)) function unflatten(flat_tuple::Tuple, target::Union{IntType, Tuple}) unflatten_tuple, flat_remidner = unflatten_impl(flat_tuple, target) return unflatten_tuple end function insert(@nospecialize(t::Tuple), x, N) return (getindex(t, Base.OneTo(N - one(N)))..., x, getindex(t, N:length(t))...) end @generated function remove(x::Tuple, ::StaticInt{N}) where {N} M = length(x.parameters) M < N && return :x M == N && return :(Base.front(x)) f = ntuple(i-> :(x[$i]), N-1) t = ntuple(i-> :(x[$(i+N)]), M-N) return quote ($(f...), $(t...)) end end function Base.replace(@nospecialize(t::Tuple), x, N) return (getindex(t, Base.OneTo(N - one(N)))..., x, getindex(t, UnitRange(N + one(N), length(t)))...) end @inline function replace_front(@nospecialize(t::Tuple), v) return (v, Base.tail(t)...) end @inline replace_front(t, v) = v @generated function replace_front(::Type{T}, ::Type{V}) where {T<:Tuple,V} expr = Expr(:curly, Tuple) push!(expr.args, V) push!(expr.args, T.parameters[2:end]...) return expr end @inline function replace_back(@nospecialize(t::Tuple), v) return (Base.front(t)..., v) end @inline replace_back(t, v) = v @inline function Base.repeat(x, n) return ntuple(Returns(x), n) end @inline repeat_like(t, x) = x @inline function repeat_like(t::Tuple, x) return repeat_like(typeof(t), x) end function repeat_inner(expr, T) for i in T.parameters if i <: IntType push!(expr.args, :x) elseif i <: Tuple push!(expr.args, repeat_inner(Expr(:tuple), i)) end end return expr end @generated function repeat_like(::Type{T}, x) where {T<:Tuple} return repeat_inner(Expr(:tuple), T) end @generated function group(t::Tuple, ::StaticInt{B}, ::StaticInt{E}) where {B,E} return quote @inbounds (t[1:$B-1]..., t[$B:$E], t[$E+1:end]...) end end # Group the elements [B,E] of a T into a single element function group(t::Tuple, b, e) return (getindex(t, Base.OneTo(b - one(b)))..., getindex(t, UnitRange(b, e)), getindex(t, UnitRange(e + one(e), length(t)))...) end # append x to extend t to rank N @inline function append(t::Union{Tuple, IntType}, val, ::StaticInt{N}) where {N} M = length(t) M > N && throw(ArgumentError(LazyString("input tuple of length ", M, ", requested ", N))) if @generated quote (t..., $(fill(:val, N - length(t.parameters))...)) end else (t..., ntuple(Returns(val), N-M)...) end end @generated function append(::Type{T}, ::Type{X}) where {T<:Tuple, X} expr = Expr(:curly, Tuple) push!(expr.args, T.parameters...) push!(expr.args, X) return expr end @generated function append(::Type{T}, ::Type{X}) where {T, X} expr = Expr(:curly, Tuple) push!(expr.args, T) push!(expr.args, X) return expr end function append(t::Tuple, x) @inline return (t..., x) end function append(t::IntType, x::IntType) @inline return (t, x) end @inline function prepend(t::Union{Tuple, IntType}, val, ::StaticInt{N}) where {N} M = length(t) M > N && throw(ArgumentError(LazyString("input tuple of length ", M, ", requested ", N))) if @generated quote ($(fill(:val, N - length(t.parameters))...), t...) end else (ntuple(Returns(val), N-M)..., t...) end end function prepend(t::Tuple, x) @inline return (x, t...) end @generated function prepend(::Type{T}, ::Type{X}) where {T<:Tuple, X} expr = Expr(:curly, Tuple) push!(expr.args, X) push!(expr.args, T.parameters...) return expr end @generated function prepend(::Type{T}, ::Type{X}) where {T, X} expr = Expr(:curly, Tuple) push!(expr.args, X) push!(expr.args, T) return expr end function prepend(t::IntType, x::IntType) @inline return (x, t) end # specialize on the operation Base.@assume_effects :total @generated function _foldl(op::G, x::Tuple, init) where {G} length(x.parameters) == 0 && return :init expr = :(op(init, x[1])) for i in 2:length(x.parameters) expr = :(op($expr, x[$i])) end return quote Base.@_inline_meta @inbounds $expr end end _foldl(op::G, x::IntType, init) where {G} = op(init, x) Base.@assume_effects :total @generated function escan(f::F, x::NTuple{N, T}, init::T) where {F, N, T} q = Expr(:block, Expr(:meta, :inline, :propagate_inbounds)) if N == 1 push!(q.args, :init) return q end syms = ntuple(i -> Symbol(:i_, i), N) push!(q.args, Expr(:(=), syms[1], :init)) for n in 1:(N - 1) push!(q.args, Expr(:(=), syms[n + 1], Expr(:call, :f, syms[n], Expr(:ref, :x, n)))) end push!(q.args, Expr(:return, Expr(:tuple, syms...))) return q end @inline _zip(t::Tuple{Vararg{Tuple}}) = tuple(zip(t...)...) @inline _zip(t::Tuple) = tuple(t) @inline _zip(t) = t @inline _zip(t1, t2, t3...) = _zip((t1, t2, t3...)) function zip2_by(t, guide::Tuple) TR = length(t) GR = length(guide) GR <= TR || throw(ArgumentError("zip2_by: guide tuple is longer than input tuple")) split = map(zip2_by, t[1:GR], guide[1:GR]) result = tuple(zip(split...)...) return (result[1], (result[2]..., t[(GR + 1):end]...)) end function zip2_by(t, guide) @assert length(t) == 2 return t end @generated hascolon(::T) where T = :($(Colon ∈ T.parameters)) @generated hascolon(::Type{T}) where T = :($(Colon ∈ T.parameters)) @generated function Base.reverse(::Type{T}) where {T<:Tuple} expr = Expr(:curly, Tuple) push!(expr.args, Core._apply_iterate(Base.iterate, Base.revargs, T.parameters)...) return expr end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1816
abstract type AbstractCopyOp{SRegisters, DRegisters} <: PTXOperation end function Base.getproperty(obj::AbstractCopyOp{SRegisters, DRegisters}, sym::Symbol) where {SRegisters, DRegisters} if sym === :DRegisters return DRegisters elseif sym === :SRegisters return SRegisters else return getfield(obj,sym) end end function Base.propertynames(::AbstractCopyOp) return (:SRegisters, :DRegisters) end # default implementation, 1 value per thread function Base.copyto!(op::AbstractCopyOp, dest::MoYeArray, src::MoYeArray) op(pointer(dest), pointer(src)) return dest end @inline Adapt.adapt(to, x::AbstractCopyOp) = x struct UniversalCopy{TS, TD} <: AbstractCopyOp{Registers{TS, 1}, Registers{TD, 1}} end @inline UniversalCopy{S}() where {S} = UniversalCopy{S,S}() function (::UniversalCopy{TS, TD})(dest::LLVMPtr{TD}, src::LLVMPtr{TS}) where {TS, TD} @inline align_src = Base.datatype_alignment(TS) align_dst = Base.datatype_alignment(TD) return unsafe_store!(dest, unsafe_load(src, 1, Val(align_src)), 1, Val(align_dst)) end # the following methods should be moved if LocalArray has an address space function (::UniversalCopy{TS, TD})(dest::Ptr{TD}, src::Ptr{TS}) where {TS, TD} @inline src = recast(TS, src) dest = recast(TD, dest) return unsafe_store!(dest, unsafe_load(src)) end function (::UniversalCopy{TS, TD})(dest::Ptr{TD}, src::LLVMPtr{TS}) where {TS, TD} @inline return unsafe_store!(dest, unsafe_load(src, 1, Val(Base.datatype_alignment(TS)))) end function (::UniversalCopy{TS, TD})(dest::LLVMPtr{TD}, src::Ptr{TS}) where {TS, TD} @inline return unsafe_store!(dest, unsafe_load(src), 1, Val(Base.datatype_alignment(TD))) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
2273
abstract type AbstractCopyOp_ASYNC{TS,TD} <: AbstractCopyOp{Registers{TS, 1}, Registers{TD, 1}} end const CP_SYNC_ENABLED = @static if CUDA.functional() && capability(device()) >= v"8.0" true else false end struct CPOP_ASYNC_CACHEALWAYS{TS, TD} <: AbstractCopyOp_ASYNC{TS,TD} @generated function CPOP_ASYNC_CACHEALWAYS{TS, TD}() where {TS, TD} @assert sizeof(TS) == sizeof(TD) @assert sizeof(TS) in (4, 8, 16) "Only 4, 8, 16 bytes are supported, got $(sizeof(TS))" return :($(new{TS, TD}())) end end @inline CPOP_ASYNC_CACHEALWAYS{S}() where {S} = CPOP_ASYNC_CACHEALWAYS{S,S}() @generated function (::CPOP_ASYNC_CACHEALWAYS{TS, TD})(dst::LLVMPtr{TD, AS.Shared}, src::LLVMPtr{TS, AS.Global}) where {TD, TS} intr = "llvm.nvvm.cp.async.ca.shared.global.$(sizeof(TS))" return quote Base.@_inline_meta ccall($intr, llvmcall, Cvoid, (LLVMPtr{TD, AS.Shared}, LLVMPtr{TS, AS.Global}), dst, src) end end struct CPOP_ASYNC_CACHEGLOBAL{TS, TD} <: AbstractCopyOp_ASYNC{TS,TD} @generated function CPOP_ASYNC_CACHEGLOBAL{TS, TD}() where {TS, TD} @assert sizeof(TS) == sizeof(TD) @assert sizeof(TS) in (16, ) "Only 16 bytes are supported, got $(sizeof(TS))" # only 16 for LLVM 15 return :($(new{TS, TD}())) end end @inline CPOP_ASYNC_CACHEGLOBAL{S}() where {S} = CPOP_ASYNC_CACHEGLOBAL{S,S}() @generated function (::CPOP_ASYNC_CACHEGLOBAL{TS, TD})(dst::LLVMPtr{TD, AS.Shared}, src::LLVMPtr{TS, AS.Global}) where {TS, TD} intr = ".$(sizeof(TS))" return quote Base.@_inline_meta ccall($intr, llvmcall, Cvoid, (LLVMPtr{TD, AS.Shared}, LLVMPtr{TS, AS.Global}), dst, src) end end """ cp_async_wait(N::Int32) cp_async_wait() `cp_async_wait(N)` is equivalent to `cp.async.wait.group(N)` and `cp_async_wait()` is equivalent to `cp.async.wait.all` in CUDA. """ @inline cp_async_wait(i::Int32) = ccall("llvm.nvvm.cp.async.wait.group", llvmcall, Cvoid, (Int32,), i) @inline cp_async_wait() = ccall("llvm.nvvm.cp.async.wait.all", llvmcall, Cvoid, ()) """ cp_async_commit() `cp.async.commit.group`. """ @inline cp_async_commit() = ccall("llvm.nvvm.cp.async.commit.group", llvmcall, Cvoid, ())
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
5458
abstract type AbstractLdMatrix{SRegisters, DRegisters} <: AbstractCopyOp{SRegisters, DRegisters} end function Base.getproperty(obj::AbstractLdMatrix{SRegisters, DRegisters}, sym::Symbol) where {SRegisters, DRegisters} if sym === :DRegisters return DRegisters elseif sym === :SRegisters return SRegisters else return getfield(obj,sym) end end function Base.propertynames(::AbstractLdMatrix) return (:SRegisters, :DRegisters) end struct LDSM_U32x1_N <: AbstractLdMatrix{Registers{UInt128, 1}, Registers{UInt32, 1}} end struct LDSM_U32x2_N <: AbstractLdMatrix{Registers{UInt128, 1}, Registers{UInt32, 2}} end struct LDSM_U32x4_N <: AbstractLdMatrix{Registers{UInt128, 1}, Registers{UInt32, 4}} end struct LDSM_U16x2_T <: AbstractLdMatrix{Registers{UInt128, 1}, Registers{UInt32, 1}} end struct LDSM_U16x4_T <: AbstractLdMatrix{Registers{UInt128, 1}, Registers{UInt32, 2}} end struct LDSM_U16x8_T <: AbstractLdMatrix{Registers{UInt128, 1}, Registers{UInt32, 4}} end @inline function (::LDSM_U32x1_N)(src_addr::LLVMPtr{UInt32, AS.Shared}) return ccall("llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16", llvmcall, UInt32, (LLVMPtr{UInt32, AS.Shared},), src_addr) end @inline function (::LDSM_U32x2_N)(src_addr::LLVMPtr{UInt32, AS.Shared}) return ccall("llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16", llvmcall, LLVMStruct2{UInt32}, (LLVMPtr{UInt32, AS.Shared},), src_addr) end @inline function (::LDSM_U32x4_N)(src_addr::LLVMPtr{UInt32, AS.Shared}) return ccall("llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16", llvmcall, LLVMStruct4{UInt32}, (LLVMPtr{UInt32, AS.Shared},), src_addr) end @inline function (::LDSM_U16x2_T)(src_addr::LLVMPtr{UInt32, AS.Shared}) return ccall("llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16", llvmcall, UInt32, (LLVMPtr{UInt32, AS.Shared},), src_addr) end @inline function (::LDSM_U16x4_T)(src_addr::LLVMPtr{UInt32, AS.Shared}) return ccall("llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16", llvmcall, LLVMStruct2{UInt32}, (LLVMPtr{UInt32, AS.Shared},), src_addr) end @inline function (::LDSM_U16x8_T)(src_addr::LLVMPtr{UInt32, AS.Shared}) return ccall("llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16", llvmcall, LLVMStruct4{UInt32}, (LLVMPtr{UInt32, AS.Shared},), src_addr) end function Base.copyto!(op::LDSM_U32x1_N, dest::LocalArray{UInt32}, src::SharedArray{UInt128}) @inline src_ptr = pointer(src) val = op(recast(UInt32, src_ptr)) return unsafe_store!(pointer(dest), val, 1) end @inbounds function Base.copyto!(op::LDSM_U32x2_N, dest::LocalArray{UInt32}, src::SharedArray{UInt128}) @inline src_ptr = pointer(src) val = op(recast(UInt32, src_ptr)) Base.Cartesian.@nexprs 2 i -> dest[i] = getfield(val, i) return dest end @inbounds function Base.copyto!(op::LDSM_U32x4_N, dest::LocalArray{UInt32}, src::SharedArray{UInt128}) @inline src_ptr = pointer(src) val = op(recast(UInt32, src_ptr)) Base.Cartesian.@nexprs 4 i -> dest[i] = getfield(val, i) return dest end function Base.copyto!(op::LDSM_U16x2_T, dest::LocalArray{UInt32}, src::SharedArray{UInt128}) @inline src_ptr = pointer(src) val = op(recast(UInt32, src_ptr)) return unsafe_store!(pointer(dest), val, 1) end @inbounds function Base.copyto!(op::LDSM_U16x4_T, dest::LocalArray{UInt32}, src::SharedArray{UInt128}) @inline src_ptr = pointer(src) val = op(recast(UInt32, src_ptr)) dest_ptr = pointer(dest) Base.Cartesian.@nexprs 2 i -> dest[i] = getfield(val, i) return dest end @inbounds function Base.copyto!(op::LDSM_U16x8_T, dest::LocalArray{UInt32}, src::SharedArray{UInt128}) @inline src_ptr = pointer(src) val = op(recast(UInt32, src_ptr)) dest_ptr = pointer(dest) Base.Cartesian.@nexprs 4 i -> dest[i] = getfield(val, i) return dest end """ copyto!(ldmatrix::AbstractLdMatrix, dest::MoYeArray{UInt32}, src::MoYeArray{UInt128}) Load data from shared memory to registers. The available `AbstractLdMatrix`s are: ```julia # Type => LLVM intrinsic "LDSM_U32x1_N" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16" "LDSM_U32x2_N" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16" "LDSM_U32x4_N" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16" "LDSM_U16x2_T" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16" "LDSM_U16x4_T" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16" "LDSM_U16x8_T" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16" ``` You can inspect the number and the type of registers used per thread by ```julia julia> LDSM_U32x4_N() LDSM_U32x4_N() julia> ans.DRegisters Registers{UInt32, 4} ``` """ function Base.copyto!(ldmatrix::AbstractLdMatrix, dest::MoYeArray, src::MoYeArray) throw(MethodError(copyto!, (ldmatrix, dest, src))) end const ldmatrix_ops_list = [ "LDSM_U32x1_N" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16" "LDSM_U32x2_N" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16" "LDSM_U32x4_N" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16" "LDSM_U16x2_T" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16" "LDSM_U16x4_T" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16" "LDSM_U16x8_T" => "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16" ] export LDSM_U32x1_N, LDSM_U32x2_N, LDSM_U32x4_N, LDSM_U16x2_T, LDSM_U16x4_T, LDSM_U16x8_T
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
16036
""" fma!(::AbstractMMAOP, D, A, B, C) Perform matrix multiply-and-accumulate computation, `A*B+C`, and store the result in D. The available mma operations can be found in `MoYe.mma_ops_list`. ```julia julia> MoYe.mma_ops_list 51-element Vector{Any}: "MMAOP_8x8x4_F64F64F64F64_TN" => "llvm.nvvm.mma.m8n8k4.row.col.f64" "MMAOP_16x8x4_F32TF32TF32F32_TN" => "llvm.nvvm.mma.m16n8k4.row.col.tf32" "MMAOP_16x8x8_F32TF32TF32F32_TN" => "llvm.nvvm.mma.m16n8k8.row.col.tf32" "MMAOP_16x8x16_F32BF16BF16F32_TN" => "llvm.nvvm.mma.m16n8k16.row.col.bf16" "MMAOP_16x8x8_F32BF16BF16F32_TN" => "llvm.nvvm.mma.m16n8k8.row.col.bf16" "MMAOP_8x8x4_F16F16F16F16_TT" => "llvm.nvvm.mma.m8n8k4.row.row.f16.f16" "MMAOP_8x8x4_F16F16F16F16_NT" => "llvm.nvvm.mma.m8n8k4.col.row.f16.f16" "MMAOP_8x8x4_F16F16F16F16_TN" => "llvm.nvvm.mma.m8n8k4.row.col.f16.f16" "MMAOP_8x8x4_F16F16F16F16_NN" => "llvm.nvvm.mma.m8n8k4.col.col.f16.f16" "MMAOP_8x8x4_F32F16F16F16_TT" => "llvm.nvvm.mma.m8n8k4.row.row.f32.f16" "MMAOP_8x8x4_F32F16F16F16_NT" => "llvm.nvvm.mma.m8n8k4.col.row.f32.f16" "MMAOP_8x8x4_F32F16F16F16_TN" => "llvm.nvvm.mma.m8n8k4.row.col.f32.f16" "MMAOP_8x8x4_F32F16F16F16_NN" => "llvm.nvvm.mma.m8n8k4.col.col.f32.f16" "MMAOP_16x8x8_F16F16F16F16_TN" => "llvm.nvvm.mma.m16n8k8.row.col.f16.f16" ⋮ "MMAOP_16x8x16_S32U8S8S32_TN_SATURATE" => "llvm.nvvm.mma.m16n8k16.row.col.satfinite.u8.s8" "MMAOP_16x8x16_S32U8U8S32_TN" => "llvm.nvvm.mma.m16n8k16.row.col.u8" "MMAOP_16x8x16_S32U8U8S32_TN_SATURATE" => "llvm.nvvm.mma.m16n8k16.row.col.satfinite.u8" "MMAOP_16x8x32_S32U8S8S32_TN" => "llvm.nvvm.mma.m16n8k32.row.col.u8.s8" "MMAOP_16x8x32_S32U8S8S32_TN_SATURATE" => "llvm.nvvm.mma.m16n8k32.row.col.satfinite.u8.s8" "MMAOP_16x8x32_S32U8U8S32_TN" => "llvm.nvvm.mma.m16n8k32.row.col.u8" "MMAOP_16x8x32_S32U8U8S32_TN_SATURATE" => "llvm.nvvm.mma.m16n8k32.row.col.satfinite.u8" "MMAOP_8x8x128_S32B1B1S32_TN_XORPOPC" => "llvm.nvvm.mma.xor.popc.m8n8k128.row.col.b1" "MMAOP_8x8x128_S32B1B1S32_TN_ANDPOPC" => "llvm.nvvm.mma.and.popc.m8n8k128.row.col.b1" "MMAOP_16x8x128_S32B1B1S32_TN_XORPOPC" => "llvm.nvvm.mma.xor.popc.m16n8k128.row.col.b1" "MMAOP_16x8x128_S32B1B1S32_TN_ANDPOPC" => "llvm.nvvm.mma.and.popc.m16n8k128.row.col.b1" "MMAOP_16x8x256_S32B1B1S32_TN_XORPOPC" => "llvm.nvvm.mma.xor.popc.m16n8k256.row.col.b1" "MMAOP_16x8x256_S32B1B1S32_TN_ANDPOPC" => "llvm.nvvm.mma.and.popc.m16n8k256.row.col.b1" ``` You can instantiate any of these `AbstractMMAOP`s and inspect the information about the operation ```julia julia> op = MMAOP_16x8x8_F32TF32TF32F32_TN() MMAOP_16x8x8_F32TF32TF32F32_TN() julia> op.ARegisters # Register type, and number of registers MoYe.Registers{UInt32, 4} julia> op.BRegisters MoYe.Registers{UInt32, 2} julia> op.CRegisters MoYe.Registers{Float32, 4} ``` !!! note Do not use `mma` with `wmma.load` together. Their data layouts do not agree. The correct execution chain is ldmatrix + mma. """ function fma! end const geom_to_shape = Dict( "m8n8k4" => static((8, 8, 4)), "m16n8k4" => static((16, 8, 4)), "m16n8k8" => static((16, 8, 8)), "m16n8k16" => static((16, 8, 16)), "m16n8k32" => static((16, 8, 32)), "m8n8k128" => static((8, 8, 128)), "m16n8k128" => static((16, 8, 128)), "m16n8k256" => static((16, 8, 256)), "m8n8k32" => static((8, 8, 32)), "m16n8k32" => static((16, 8, 32)), "m16n8k64" => static((16, 8, 64)), "m8n8k16" => static((8, 8, 16)), "m16n8k16" => static((16, 8, 16)), "m16n8k32" => static((16, 8, 32)), ) # PTX types to LLVM types for registers const ptx_to_llvm_reg = Dict( "f16" => "<2 x half>", "f32" => "float", "f64" => "double", "s32" => "i32", "b16" => "i32", "s8" => "i32", "u8" => "i32", "s4" => "i32", "u4" => "i32", "b1" => "i32", "bf16" => "i32", "tf32" => "i32", ) # PTX types to julia types for registers const ptx_to_jl_reg = Dict( "f16" => NTuple{2, VecElement{Float16}}, "f32" => Float32, "f64" => Float64, "s32" => UInt32, "b16" => UInt32, "s8" => UInt32, "u8" => UInt32, "s4" => UInt32, "u4" => UInt32, "b1" => UInt32, "bf16" => UInt32, "tf32" => UInt32 ) const ptx_to_jl = Dict( "f16" => Float16, "f32" => Float32, "f64" => Float64, "s32" => Int32, "s8" => Int8, "u8" => UInt8, "bf16" => BFloat16, "tf32" => Float32, "b1" => Bool, ) # geom to num of registers const nregs = Dict( # u8/s8 -> s32 @ m16n16k16/m8n32k16/m32n8k16 "m16n16k16:a:u8" => 2, "m16n16k16:a:s8" => 2, "m16n16k16:b:u8" => 2, "m16n16k16:b:s8" => 2, "m16n16k16:c:s32" => 8, "m16n16k16:d:s32" => 8, "m8n32k16:a:u8" => 1, "m8n32k16:a:s8" => 1, "m8n32k16:b:u8" => 4, "m8n32k16:b:s8" => 4, "m8n32k16:c:s32"=>8, "m8n32k16:d:s32"=>8, "m32n8k16:a:u8"=>4, "m32n8k16:a:s8"=>4, "m32n8k16:b:u8"=>1, "m32n8k16:b:s8"=>1, "m32n8k16:c:s32"=>8, "m32n8k16:d:s32"=>8, "m8n8k16:a:u8"=>1, "m8n8k16:a:s8"=>1, "m8n8k16:b:u8"=>1, "m8n8k16:b:s8"=>1, "m8n8k16:c:s32"=>2, "m8n8k16:d:s32"=>2, "m16n8k16:a:u8"=>2, "m16n8k16:a:s8"=>2, "m16n8k16:b:u8"=>1, "m16n8k16:b:s8"=>1, "m16n8k16:c:s32"=>4, "m16n8k16:d:s32"=>4, "m16n8k32:a:u8"=>4, "m16n8k32:a:s8"=>4, "m16n8k32:b:u8"=>2, "m16n8k32:b:s8"=>2, "m16n8k32:c:s32"=>4, "m16n8k32:d:s32"=>4, # u4/s4 -> s32 @ m8n8k32 (u4/s4) "m8n8k32:a:u4"=>1, "m8n8k32:a:s4"=>1, "m8n8k32:b:u4"=>1, "m8n8k32:b:s4"=>1, "m8n8k32:c:s32"=>2, "m8n8k32:d:s32"=>2, "m16n8k32:a:u4"=>2, "m16n8k32:a:s4"=>2, "m16n8k32:b:u4"=>1, "m16n8k32:b:s4"=>1, "m16n8k32:c:s32"=>4, "m16n8k32:d:s32"=>4, "m16n8k64:a:u4"=>4, "m16n8k64:a:s4"=>4, "m16n8k64:b:u4"=>2, "m16n8k64:b:s4"=>2, "m16n8k64:c:s32"=>4, "m16n8k64:d:s32"=>4, # b1 -> s32 @ m8n8k128(b1) "m8n8k128:a:b1"=>1, "m8n8k128:b:b1"=>1, "m8n8k128:c:s32"=>2, "m8n8k128:d:s32"=>2, "m16n8k128:a:b1"=>2, "m16n8k128:b:b1"=>1, "m16n8k128:c:s32"=>4, "m16n8k128:d:s32"=>4, "m16n8k256:a:b1"=>4, "m16n8k256:b:b1"=>2, "m16n8k256:c:s32"=>4, "m16n8k256:d:s32"=>4, # bf16 -> s32 @ m16n16k16/m8n32k16/m32n8k16 "m16n16k16:a:bf16"=>4, "m16n16k16:b:bf16"=>4, "m8n32k16:a:bf16"=>2, "m8n32k16:b:bf16"=>8, "m32n8k16:a:bf16"=>8, "m32n8k16:b:bf16"=>2, "m16n8k16:a:bf16"=>4, "m16n8k16:b:bf16"=>2, "m16n8k16:c:f32"=>4, "m16n8k16:d:f32"=>4, "m16n8k8:a:bf16"=>2, "m16n8k8:b:bf16"=>1, "m16n8k8:c:f32"=>4, "m16n8k8:d:f32"=>4, "m8n8k4:a:f64"=>1, "m8n8k4:b:f64"=>1, "m8n8k4:c:f64"=>2, "m8n8k4:d:f64"=>2, # tf32 -> s32 @ m16n16k8 "m16n16k8:a:tf32"=>4, "m16n16k8:b:tf32"=>4, "m16n8k4:a:tf32"=>2, "m16n8k4:b:tf32"=>1, "m16n8k4:c:f32"=>4, "m16n8k4:d:f32"=>4, "m16n8k8:a:tf32"=>4, "m16n8k8:b:tf32"=>2, "m16n8k8:c:f32"=>4, "m16n8k8:d:f32"=>4, "m8n8k4:a:f16"=>2, "m8n8k4:b:f16"=>2, "m16n8k8:a:f16"=>2, "m16n8k8:b:f16"=>1, "m16n8k8:c:f16"=>2, "m16n8k8:d:f16"=>2, "m16n8k8:c:f32"=>4, "m16n8k8:d:f32"=>4, "m16n8k16:a:f16"=>4, "m16n8k16:b:f16"=>2, "m16n8k16:c:f16"=>2, "m16n8k16:d:f16"=>2, "m16n8k16:c:f32"=>4, "m16n8k16:d:f32"=>4, # ldmatrix "m8n8:x1:b16"=>1, "m8n8:x2:b16"=>2, "m8n8:x4:b16"=>4, ) # all other combinations have the smae size const other_nregs = Dict( "a:f16" => 8, "b:f16" => 8, "c:f16" => 4, "d:f16" => 4, "c:f32" => 8, "d:f32" => 8, ) function get_mmp_op_signature(alayout, blayout, satf, b1op) alayout = alayout == "row" ? "T" : "N" blayout = blayout == "row" ? "T" : "N" satf = isempty(satf) ? "" : "_SATURATE" if isempty(b1op) blop = "" elseif b1op == ".xor.popc" blop = "_XORPOPC" else blop = "_ANDPOPC" end return "$alayout$blayout$satf$blop" end function get_nregs(geom, frag, ptx_elt_type) return get(nregs, "$geom:$frag:$ptx_elt_type", get(other_nregs, "$frag:$ptx_elt_type", nothing)) end # LLVMStruct lowers to {llvmT, llvmT, ...}, which matches the return type of the intrinsic for N in unique(vcat(unique(values(nregs)), unique(values(other_nregs)))) struct_ty = Symbol("LLVMStruct$N") @eval struct $struct_ty{T} Base.Cartesian.@nexprs $N i -> x_i::T end #@eval Base.convert(::Type{NTuple{$N, T}}, x::$struct_ty{T}) where {T} = ntuple(i -> getfield(x, i), $N) end function get_b1_ops(ptx_type) ptx_type != "b1" && return [""] return [".xor.popc", ".and.popc"] end function make_frag(geom, frag, ptx_elt_type) T, S = ptx_to_jl_reg[ptx_elt_type], get_nregs(geom, frag, ptx_elt_type) return Registers{T, S} end function mma_intrinsic_signature(ptx_type_d, ptx_type_a, ptx_type_b, ptx_type_c,) if ptx_type_a == "f16" return "$ptx_type_d.$ptx_type_c" # FP16 ops identified by accumulator & result type. elseif ptx_type_a != ptx_type_b return "$ptx_type_a.$ptx_type_b" # other ops are identified by input types. else return ptx_type_a # if input types are the same, it only appears once. end end function convert_geom(input_string::String) regex = r"(\d+)" matches = collect(eachmatch(regex, input_string)) result = join((m.match for m in matches), "x") return result end function get_ccall_args(ARegisters, BRegisters, CRegisters, DRegisters) a_frag_ty = eltype(ARegisters) b_frag_ty = eltype(BRegisters) c_frag_ty = eltype(CRegisters) d_frag_ty = eltype(DRegisters) a_sz = length(ARegisters) b_sz = length(BRegisters) c_sz = length(CRegisters) d_sz = length(DRegisters) a_types = ntuple(i -> a_frag_ty, a_sz) b_types = ntuple(i -> b_frag_ty, b_sz) c_types = ntuple(i -> c_frag_ty, c_sz) d_types = @eval $(Symbol(:LLVMStruct,d_sz)){$d_frag_ty} a_vars = ntuple(i -> :(a[$i]), a_sz) b_vars = ntuple(i -> :(b[$i]), b_sz) c_vars = ntuple(i -> :(c[$i]), c_sz) return a_types, b_types, c_types, d_types, a_vars, b_vars, c_vars, d_frag_ty, d_sz end function is_mma_variant_supported(type_a, type_b, type_c, type_d, geom, layout_a, layout_b, satf) if !(isempty(satf) || (type_a in ["s8", "u8", "s4", "u4"])) return false end if geom == "m8n8k4" && (type_c == "f32") && (type_d != "f32") return false end if geom == "m16n8k8" && (type_a != type_b || type_c != type_d) return false end if geom == "m16n8k16" && (type_c != type_d) return false end if !((geom == "m8n8k4") && type_a == "f16") return layout_a == "row" && layout_b == "col" end return true end function make_mma_ops(geoms, types_a, types_b, types_c, types_d) struct_names = [] for (geom, type_a, type_c) in Iterators.product(geoms, types_a, types_c) for (type_b, type_d) in Iterators.product(ifelse(isempty(types_b), [type_a], types_b), ifelse(isempty(types_d), [type_c], types_d)) for (alayout, blayout, satf) in Iterators.product(["row", "col"], ["row", "col"], ["", ".satfinite"]) if !is_mma_variant_supported(type_a, type_b, type_c, type_d, geom, alayout, blayout, satf) continue end for b1op in get_b1_ops(type_a) # Step 1: Construct the MMA_OP struct struct_name = "MMAOP_$(convert_geom(geom))_$(uppercase(type_d*type_a*type_b*type_c))_$(get_mmp_op_signature(alayout, blayout, satf, b1op))" DRegisters = make_frag(geom, "d", type_d) ARegisters = make_frag(geom, "a", type_a) BRegisters = make_frag(geom, "b", type_b) CRegisters = make_frag(geom, "c", type_c) _struct_name = Symbol(struct_name) @eval struct $_struct_name <: AbstractMMAOP{$DRegisters, $ARegisters, $BRegisters, $CRegisters} end @eval export $_struct_name # Step 2: Construct the intrinsic intrinsic_signature = mma_intrinsic_signature(type_d, type_a, type_b, type_c) mma_intrinsic = "llvm.nvvm.mma$b1op.$geom.$alayout.$blayout$satf.$intrinsic_signature" push!(struct_names, struct_name => mma_intrinsic) a_types, b_types, c_types, d_types, a_vars, b_vars, c_vars, d_frag_ty, d_sz = get_ccall_args(ARegisters(), BRegisters(), CRegisters(), DRegisters()) # Step 3: define fma! if d_sz == 1 @eval @inline function (::$_struct_name)(a, b, c) return ccall($mma_intrinsic, llvmcall, $d_frag_ty, ($(a_types...), $(b_types...), $(c_types...)), $(a_vars...), $(b_vars...), $(c_vars...)) end @eval @inline function fma!(op::$_struct_name, d, a, b, c) val = op(a,b,c) return unsafe_store!(pointer(d), val, 1) end else @eval @inline function (::$_struct_name)(a, b, c) return ccall($mma_intrinsic, llvmcall, $d_types, ($(a_types...), $(b_types...), $(c_types...)), $(a_vars...), $(b_vars...), $(c_vars...)) end @eval @inline @inbounds function fma!(op::$_struct_name, d::MoYeArray, a::MoYeArray, b::MoYeArray, c::MoYeArray) val = op(a,b,c) ptr = pointer(d) Base.Cartesian.@nexprs $d_sz i -> d[i] = getfield(val, i) return d end end # Step 4: record the information about the operation @eval @inline valtype_d(::$_struct_name) = $(ptx_to_jl[type_d]) @eval @inline valtype_a(::$_struct_name) = $(ptx_to_jl[type_a]) @eval @inline valtype_b(::$_struct_name) = $(ptx_to_jl[type_b]) @eval @inline valtype_c(::$_struct_name) = $(ptx_to_jl[type_c]) @eval @inline shape_mnk(mma_op::$_struct_name) = $(geom_to_shape[geom]) @eval @inline alignment_a(::$_struct_name) = $(ifelse(alayout == "row", :(:row), :(:col))) @eval @inline alignment_b(::$_struct_name) = $(ifelse(blayout == "row", :(:row), :(:col))) end end end end return struct_names end function get_mma_ops() vcat( make_mma_ops(["m8n8k4"], ["f64"], [], ["f64"], []), make_mma_ops(["m16n8k4", "m16n8k8"], ["tf32"], [], ["f32"], []), make_mma_ops(["m16n8k16", "m16n8k8"], ["bf16"], [], ["f32"], []), make_mma_ops(["m8n8k4", "m16n8k8", "m16n8k16"], ["f16"], [],[ "f16", "f32"], ["f16", "f32"]), make_mma_ops(["m8n8k16", "m16n8k16", "m16n8k32"], ["s8", "u8"], ["s8", "u8"], ["s32"], []), #make_mma_ops(["m8n8k32", "m16n8k32", "m16n8k64"], ["s4", "u4"], ["s4", "u4"], ["s32"], []), # Julia does not support s4/u4 make_mma_ops(["m8n8k128", "m16n8k128", "m16n8k256"], ["b1"], [], ["s32"], []), # 6 ) end const mma_ops_list = get_mma_ops()
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
2414
abstract type PTXOperation end @inline apply(op::PTXOperation, args...) = op(args...) abstract type AbstractMMAOP{DRegisters, ARegisters, BRegisters, CRegisters} <: PTXOperation end @inline Adapt.adapt(to, x::AbstractMMAOP) = x @inline fma!(mmaop::AbstractMMAOP, a, b, c) = fma!(mmaop, c, a, b, c) """ Registers{T,S} A struct that wraps the register file type `T` and number of register files `S`. """ struct Registers{T,S} end @inline Base.eltype(::Registers{T}) where {T} = T @inline Base.length(::Registers{T, L}) where {T, L} = L @inline Base.eltype(::Type{<:Registers{T}}) where {T} = T @inline Base.length(::Type{Registers{T, L}}) where {T, L} = L function Base.getproperty(obj::AbstractMMAOP{DRegisters, ARegisters, BRegisters, CRegisters}, sym::Symbol) where {DRegisters, ARegisters, BRegisters, CRegisters} if sym === :DRegisters return DRegisters elseif sym === :ARegisters return ARegisters elseif sym === :BRegisters return BRegisters elseif sym === :CRegisters return CRegisters else return getfield(obj,sym) end end function Base.propertynames(::AbstractMMAOP) return (:DRegisters, :ARegisters, :BRegisters, :CRegisters) end @inline regtype_d(mma_op::AbstractMMAOP) = eltype(mma_op.DRegisters) @inline regtype_a(mma_op::AbstractMMAOP) = eltype(mma_op.ARegisters) @inline regtype_b(mma_op::AbstractMMAOP) = eltype(mma_op.BRegisters) @inline regtype_c(mma_op::AbstractMMAOP) = eltype(mma_op.CRegisters) @inline regnum_d(mma_op::AbstractMMAOP) = length(mma_op.DRegisters) @inline regnum_a(mma_op::AbstractMMAOP) = length(mma_op.ARegisters) @inline regnum_b(mma_op::AbstractMMAOP) = length(mma_op.BRegisters) @inline regnum_c(mma_op::AbstractMMAOP) = length(mma_op.CRegisters) struct UniversalFMA{D,A,B,C} <: AbstractMMAOP{Registers{D, 1}, Registers{A, 1}, Registers{B, 1}, Registers{C, 1}} end UniversalFMA{A,B,C}() where {A,B,C} = UniversalFMA{C,A,B,C}() @inline valtype_a(::UniversalFMA{D,A,B,C}) where {D,A,B,C} = A @inline valtype_b(::UniversalFMA{D,A,B,C}) where {D,A,B,C} = B @inline valtype_c(::UniversalFMA{D,A,B,C}) where {D,A,B,C} = C @inline valtype_d(::UniversalFMA{D,A,B,C}) where {D,A,B,C} = D @inline fma!(::UniversalFMA, d, a, b, c) = @inbounds d .= a .* b .+ c
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
14092
abstract type AbstractCopyAtom{Traits, T, OP} <: AbstractCopyTraits{OP} end struct CopyAtom{Traits, T, OP, VS, VD, VR, NS, ND} <: AbstractCopyAtom{Traits, T, OP} traits::Traits val_layout_src::VS val_layout_dst::VD val_layout_ref::VR num_val_src::NS num_val_dst::ND end function Base.getproperty(atom::CopyAtom, x::Symbol) if x === :bit_layout_src return atom.traits.srclayout elseif x === :bit_layout_dst return atom.traits.dstlayout elseif x === :bit_layout_ref return atom.traits.reflayout elseif x === :threadid return atom.traits.threadid else return getfield(atom, x) end end function Base.propertynames(::CopyAtom) return (:traits, :bit_layout_src, :bit_layout_dst, :bit_layout_ref, :val_layout_src, :val_layout_dst, :val_layout_ref, :num_val_src, :num_val_dst) end function CopyAtom{Traits, T}() where {OP, Traits <: CopyTraits{OP}, T} @inline traits = Traits() threadid = traits.threadid val_layout_src = upcast(traits.srclayout, sizeof_bits(T)) val_layout_dst = upcast(traits.dstlayout, sizeof_bits(T)) val_layout_ref = upcast(traits.reflayout, sizeof_bits(T)) @assert size(val_layout_src, 1) == size(threadid) @assert size(val_layout_dst, 1) == size(threadid) @assert size(val_layout_ref, 1) == size(threadid) num_val_src = size(val_layout_src, 2) num_val_dst = size(val_layout_dst, 2) return CopyAtom{typeof(traits), T, OP, typeof(val_layout_src), typeof(val_layout_dst), typeof(val_layout_ref), typeof(num_val_src), typeof(num_val_dst)}(traits, val_layout_src, val_layout_dst, val_layout_ref, num_val_src, num_val_dst) end function CopyAtom{CPOP, T}() where {CPOP <: AbstractCopyOp, T} @inline op = CPOP() return CopyAtom{CopyTraits{typeof(op)}, T}() end @inline function num_val_src(::Type{CopyAtom{Traits, T, OP, VS, VD, VR, NS, ND}}) where { Traits, T, OP, VS, VD, VR, NS, ND} return NS end @inline function num_val_dst(::Type{CopyAtom{Traits, T, OP, VS, VD, VR, NS, ND}}) where { Traits, T, OP, VS, VD, VR, NS, ND} return ND end @inline get_traits(atom::CopyAtom) = atom.traits struct TiledCopy{Traits, T, OP, CP, LT, ST} <: AbstractCopyAtom{Traits, T, OP} copy_atom::CP tiled_layout_TV::LT tiler_MN::ST end @inline atom_threadid(tiled_copy::TiledCopy) = tiled_copy.copy_atom.threadid @inline atom_layout_src(tiled_copy::TiledCopy) = tiled_copy.copy_atom.val_layout_src @inline atom_layout_dst(tiled_copy::TiledCopy) = tiled_copy.copy_atom.val_layout_dst @inline atom_layout_ref(tiled_copy::TiledCopy) = tiled_copy.copy_atom.val_layout_ref function num_val_src(::Type{<:TiledCopy{Traits, T, OP, CP}}) where {Traits, T, OP, VS, VD, VR, NS, ND, CP <: CopyAtom{Traits, T, OP, VS, VD, VR, NS, ND}} @inline return NS end function num_val_dst(::Type{<:TiledCopy{Traits, T, OP, CP}}) where {Traits, T, OP, VS, VD, VR, NS, ND, CP <: CopyAtom{Traits, T, OP, VS, VD, VR, NS, ND}} @inline return ND end @inline get_traits(atom::TiledCopy) = get_traits(atom.copy_atom) function TiledCopy(atom::CopyAtom{Traits, T, OP}, tiled_layout_TV::Layout, tiler_MN) where {Traits, T, OP} @assert size(tiled_layout_TV, 1) % size(atom.val_layout_ref, 1) == Zero() @assert size(tiled_layout_TV, 2) % size(atom.val_layout_ref, 2) == Zero() return TiledCopy{Traits, T, OP, typeof(atom), typeof(tiled_layout_TV), typeof(tiler_MN) }(atom, tiled_layout_TV, tiler_MN) end function tile2thrfrg(tiled_copy::TiledCopy, x::Layout, ref2trg) atom_num_thr = size(atom_layout_ref(tiled_copy), 1) atom_num_val = size(atom_layout_ref(tiled_copy), 2) atom_layout_TV = zipped_divide(tiled_copy.tiled_layout_TV, (atom_num_thr, atom_num_val)) trg_layout_TV = composition(atom_layout_TV, (ref2trg, :)) thrval2mn = coalesce(_zip(trg_layout_TV), (One(), (One(), One()))) tv_array = composition(x, (thrval2mn, :)) return tv_array((:, :), :) end function tidfrg_S(tiled_copy::TiledCopy, src::Layout{N}) where {N} @assert N>=rank(shape(tiled_copy.tiler_MN)) "The dimension is too small to be tiled." return tile2thrfrg(tiled_copy, zipped_divide(src, tiled_copy.tiler_MN), composition(right_inverse(atom_layout_ref(tiled_copy)), atom_layout_src(tiled_copy))) end function tidfrg_D(tiled_copy::TiledCopy, dst::Layout{N}) where {N} @assert N>=rank(shape(tiled_copy.tiler_MN)) "The dimension is too small to be tiled." return tile2thrfrg(tiled_copy, zipped_divide(dst, tiled_copy.tiler_MN), composition(right_inverse(atom_layout_ref(tiled_copy)), atom_layout_dst(tiled_copy))) end function retile(tiled_copy, x::StaticLayout{R}) where {R} V = size(x, _1) tiled_layout_TV = tiled_copy.tiled_layout_TV tiled_shape_MN = shape(tiled_copy.tiler_MN) atom_num_val = size(atom_layout_ref(tiled_copy), _2) tiled_num_thr = size(tiled_layout_TV, _1) frg_layout_mn = upcast(composition(right_inverse(tiled_layout_TV), make_layout(tiled_shape_MN)), tiled_num_thr * V) frg_layout_v = zipped_divide(logical_product(make_layout(V), right_inverse(frg_layout_mn)), make_layout(atom_num_val)) t_array = zipped_divide(x, prepend(product_each(shape(frg_layout_mn)), V)) v_array = composition(t_array, (frg_layout_v, :)) return v_array(:, append(One(), :, StaticInt{R}())) end function get_layoutS_TV(tiled_copy::TiledCopy) ref_S = make_layout((shape(tiled_copy.tiler_MN), One())) return tile2thrfrg(tiled_copy, ref_S, composition(right_inverse(atom_layout_ref(tiled_copy)), atom_layout_src(tiled_copy)))(:, :, One()) end function get_layoutS_MN(tiled_copy::TiledCopy) tiled_shape_MN = shape(tiled_copy.tiler_MN) layoutS_TV = get_layoutS_TV(tiled_copy) layoutS_MK = composition(right_inverse(layoutS_TV), make_layout(tiled_shape_MN)) thrID_S = make_layout(size(tiled_copy.tiled_layout_TV, 1)) return (layoutS_MK, thrID_S) end function get_layoutD_TV(tiled_copy::TiledCopy) ref_D = make_layout((shape(tiled_copy.tiler_MN), One())) return tile2thrfrg(tiled_copy, ref_D, composition(right_inverse(atom_layout_ref(tiled_copy)), atom_layout_dst(tiled_copy)))(:, :, One()) end function get_layoutD_MN(tiled_copy::TiledCopy) tiled_shape_MN = shape(tiled_copy.tiler_MN) layoutD_TV = get_layoutD_TV(tiled_copy) layoutD_MK = composition(right_inverse(layoutD_TV), make_layout(tiled_shape_MN)) thrID_D = make_layout(size(tiled_copy.tiled_layout_TV, 1)) return (layoutD_MK, thrID_D) end struct ThrCopy{Traits, T, OP, TC, TI} <: AbstractCopyAtom{Traits, T, OP} tiled_copy::TC thr_idx::TI function ThrCopy(tiled_copy::TiledCopy{Traits, T, OP}, thr_idx) where {Traits, T, OP} return new{Traits, T, OP, typeof(tiled_copy), typeof(thr_idx)}(tiled_copy, thr_idx) end end function partition_S(thr_copy::ThrCopy, s::MoYeArray{T, N}) where {T, N} thr_tensor = MoYeArray(pointer(s), tidfrg_S(thr_copy.tiled_copy, layout(s))) return view(thr_tensor, thr_copy.thr_idx, :, repeat(:, StaticInt{N}())) end function partition_D(thr_copy::ThrCopy, d::MoYeArray{T, N}) where {T, N} thr_tensor = MoYeArray(pointer(d), tidfrg_D(thr_copy.tiled_copy, layout(d))) return view(thr_tensor, thr_copy.thr_idx, :, repeat(:, StaticInt{N}())) end function retile_S(thr_copy::ThrCopy, s::StaticMoYeArray{T, R}) where {T, R} return MoYeArray(pointer(s), retile(thr_copy.tiled_copy, layout(s))) end function retile_D(thr_copy::ThrCopy, d::StaticMoYeArray{T, R}) where {T, R} return MoYeArray(pointer(d), retile(thr_copy.tiled_copy, layout(d))) end @inline get_slice(tiled_copy::TiledCopy, thr_idx::DInt) = ThrCopy(tiled_copy, thr_idx) @inline get_thread_slice(tiled_copy::TiledCopy, thr_idx::DInt) = get_slice(tiled_copy, thr_idx) function make_tiled_copy_A(copy_atom::AbstractCopyAtom, tiled_mma::TiledMMA) M, K = tile_size(tiled_mma, _1), tile_size(tiled_mma, _3) return TiledCopy(copy_atom, get_layoutA_TV(tiled_mma), (M, K)) end function make_tiled_copy_B(copy_atom::AbstractCopyAtom, tiled_mma::TiledMMA) N, K = tile_size(tiled_mma, _2), tile_size(tiled_mma, _3) return TiledCopy(copy_atom, get_layoutB_TV(tiled_mma), (N, K)) end function make_tiled_copy_C(copy_atom::AbstractCopyAtom, tiled_mma::TiledMMA) M, N = tile_size(tiled_mma, _1), tile_size(tiled_mma, _2) return TiledCopy(copy_atom, get_layoutC_TV(tiled_mma), (M, N)) end """ make_tiled_copy(copy_atom::CopyAtom, thr_layout::Layout, val_layout::Layout) Make a tiled copy atom from a copy atom. """ function make_tiled_copy(copy_atom::CopyAtom, thr_layout::Layout{TR}, val_layout::Layout{TV}=@Layout(1)) where {TR, TV} # (M, N) -> (thr_idx, val_idx) layout_mn = raked_product(thr_layout, val_layout, true) # (thr_idx, val_idx) -> (M, N) layout_tv = composition(right_inverse(layout_mn), make_layout((size(thr_layout), size(val_layout)))) tiler = product_each(shape(layout_mn)) return TiledCopy(copy_atom, layout_tv, tiler) end function tile_size(tiled_copy::TiledCopy) return shape(tiled_copy.tiler_MN) end function Base.size(tiled_copy::TiledCopy) return size(tiled_copy.tiled_layout_TV, 1) end function Base.show(io::IO, m::CopyAtom{Traits, T}) where {Traits, T} println(io, "CopyAtom") println(io, " Thread ID: ", m.traits.threadid) println(io, " ValLayoutSrc: ", m.val_layout_src) println(io, " ValLayoutDst: ", m.val_layout_dst) println(io, " ValLayoutRef: ", m.val_layout_ref) return println(io, " ValueType: $(Int(sizeof_bits(T)))b") end function Base.show(io::IO, m::TiledCopy) println(io, "TiledCopy") println(io, " Tiler_MN: ", m.tiler_MN) println(io, " TiledLayout_TV: ", m.tiled_layout_TV) return show(io, m.copy_atom) end function Base.show(io::IO, m::ThrCopy) println(io, "ThrCopy") println(io, " ThrIdx: ", m.thr_idx) return show(io, m.tiled_copy) end Base.@assume_effects :foldable @generated function apply(copy_atom::AbstractCopyAtom, dst::MoYeArray{TS, 1}, src::MoYeArray{TD,1}) where {TS, TD} if shape(src) <: Tuple && shape(dst) <: Tuple return quote Base.@_inline_meta dst_v = MoYeArray(pointer(dst), dst.layout[_1]) src_v = MoYeArray(pointer(src), src.layout[_1]) _copyto!(copy_atom, dst_v, src_v, TrivialPred()) end else @assert num_val_src(copy_atom) == size(layout(src)) "Expected $(num_val_src(copy_atom)) but got $(size(layout(src)))" @assert num_val_dst(copy_atom) == size(layout(dst)) "Expected $(num_val_dst(copy_atom)) but got $(size(layout(dst)))" return quote Base.@_inline_meta copyto_unpack!(get_traits(copy_atom), dst, src) end end end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
346
function Base.copyto!(copy_atom::CopyAtom{Traits, T, OP}, dst::StaticOwningArray, src::SharedArray) where {Traits, T, OP <: AbstractLdMatrix} @inline buffer = ManualMemory.preserve_buffer(dst) GC.@preserve buffer begin copyto!(copy_atom, StrideArraysCore.maybe_ptr_array(dst), src) end return dst end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
14200
abstract type AbstractMMAAtom{Traits} end # Atom interface @inline valtype_a(::AbstractMMAAtom{Traits}) where {Traits} = valtype_a(Traits()) @inline valtype_b(::AbstractMMAAtom{Traits}) where {Traits} = valtype_b(Traits()) @inline valtype_c(::AbstractMMAAtom{Traits}) where {Traits} = valtype_c(Traits()) @inline valtype_d(::AbstractMMAAtom{Traits}) where {Traits} = valtype_d(Traits()) @inline frgtype_a(::AbstractMMAAtom{Traits}) where {Traits} = valtype_a(Traits()) @inline frgtype_b(::AbstractMMAAtom{Traits}) where {Traits} = valtype_b(Traits()) @inline frgtype_c(::AbstractMMAAtom{Traits}) where {Traits} = valtype_c(Traits()) @inline frgtype_d(::AbstractMMAAtom{Traits}) where {Traits} = valtype_d(Traits()) #@inline regnum_a(::AbstractMMAAtom{OP}) where {OP} = regnum_a(OP()) #@inline regnum_b(::AbstractMMAAtom{OP}) where {OP} = regnum_b(OP()) #@inline regnum_c(::AbstractMMAAtom{OP}) where {OP} = regnum_c(OP()) #@inline regnum_d(::AbstractMMAAtom{OP}) where {OP} = regnum_d(OP()) @inline layout_a(::AbstractMMAAtom{Traits}) where {Traits} = layout_a(Traits()) @inline layout_b(::AbstractMMAAtom{Traits}) where {Traits} = layout_b(Traits()) @inline layout_c(::AbstractMMAAtom{Traits}) where {Traits} = layout_c(Traits()) @inline thr_id(::AbstractMMAAtom{Traits}) where {Traits} = thr_id(Traits()) @inline shape_mnk(::AbstractMMAAtom{Traits}) where {Traits} = shape_mnk(Traits()) function make_fragment_C(m::AbstractMMAAtom, C::MoYeArray{T, N}) where {T, N} @inline @assert N ≥ 3 @assert size(layout(C), 1) == size(layout_c(m), 2) return MoYeArray{frgtype_c(m)}(undef, shape(C)) # (V, M, N) end # Note that hopper architecture needs to return a view of A/B for the fragment # In this case we always have frgtype_a(m) == T function make_fragment_A(m::AbstractMMAAtom, A::MoYeArray{T, N}) where {T, N} @inline @assert N ≥ 3 @assert size(layout(A), 1) == size(layout_a(m), 2) return make_fragment_like(frgtype_a(m), A) # (V, M, K) end function make_fragment_B(m::AbstractMMAAtom, B::MoYeArray{T, N}) where {T, N} @inline @assert N ≥ 3 @assert size(layout(B), 1) == size(layout_b(m), 2) return make_fragment_like(frgtype_b(m), B) # (V, N, K) end struct MMAAtom{Traits} <: AbstractMMAAtom{Traits} function MMAAtom{Traits}(args...) where {Traits <: AbstractMMATraits} traits = Traits(args...) return new{typeof(traits)}() end function MMAAtom{OP}(args...) where {OP <: AbstractMMAOP} traits = MMATraits{OP}(args...) return new{typeof(traits)}() end end function Base.show(io::IO, m::MMAAtom) println(io, "MMAAtom") println(io, " Shape_MNK: ", shape_mnk(m)) println(io, " Thread ID: ", thr_id(m)) println(io, " Layout_A_TV: ", layout_a(m)) println(io, " Layout_B_TV: ", layout_b(m)) return println(io, " Layout_C_TV: ", layout_c(m)) end function apply(mma_atom::AbstractMMAAtom{Traits}, D::MoYeArray{TD, 1}, A::MoYeArray{TA, 1}, B::MoYeArray{TB, 1}, C::MoYeArray{TC, 1}) where {Traits, TD, TA, TB, TC} @inline return mma_unpack!(Traits(), D, A, B, C) end function apply(mma_atom::AbstractMMAAtom, A::MoYeArray, B::MoYeArray, C::MoYeArray) @inline return apply(mma_atom, C, A, B, C) end # TiledMMA struct TiledMMA{Atom<:AbstractMMAAtom, AtomLayoutMNK <: Layout{3}, PermutationMNK <: Tile{3}, Traits} <: AbstractMMAAtom{Traits} atom::Atom atom_layout_mnk::AtomLayoutMNK permutation_mnk::PermutationMNK function TiledMMA{Atom, AtomLayoutMNK, PermutationMNK, Traits}() where {Atom, AtomLayoutMNK, PermutationMNK, Traits} return new{Atom, AtomLayoutMNK, PermutationMNK, Traits}(Atom(), AtomLayoutMNK(), make_tuple(PermutationMNK)) end function TiledMMA{Atom, AtomLayoutMNK, PermutationMNK}() where {Traits,Atom<:AbstractMMAAtom{Traits}, AtomLayoutMNK, PermutationMNK} return new{Atom, AtomLayoutMNK, PermutationMNK, Traits}(Atom(), AtomLayoutMNK(), make_tuple(PermutationMNK)) end end function Base.show(io::IO, m::TiledMMA) println(io, "TiledMMA") println(io, " ThrLayoutVMNK: ", get_thr_layout_vmnk(m)) println(io, " PermutationMNK: ", m.permutation_mnk) return show(io, m.atom) end struct ThrMMA{TA<:TiledMMA, ThrVMNK, Traits} <: AbstractMMAAtom{Traits} tiled_mma::TA thr_vmnk::ThrVMNK function ThrMMA(tiled_mma::TA, thr_vmnk::ThrVMNK) where {Traits,TA<:AbstractMMAAtom{Traits}, ThrVMNK} return new{TA, ThrVMNK, Traits}(tiled_mma, thr_vmnk) end end function Base.show(io::IO, m::ThrMMA{TA, ThrVMNK}) where {TA, ThrVMNK} println(io, "ThrMMA") println(io, " Thr VMNK: ", m.thr_vmnk) return show(io, m.tiled_mma) end @inline @generated function get_thr_layout_vmnk(::TiledMMA{Atom, AtomLayoutMNK}) where {Atom, AtomLayoutMNK} thr_layout_vmnk_ = tiled_product(thr_id(Atom()), AtomLayoutMNK()) # (thr_id, atom_M, atom_N, atom_K) return :($thr_layout_vmnk_) end function thrfrg_C(m::TiledMMA, C::Layout) thr_layout_vmnk = get_thr_layout_vmnk(m) atom_mnk = shape_mnk(m.atom) permutation_mnk = m.permutation_mnk # Assume C > permutation_mn and permutation_mn divides C such logical_divide makes sense # Because of the assumption, the "effective" tiledmma size scales up # If permutation_mnk is a tuple of ints then there is no actual permutation, it simply add nested sublayouts to the layout t_array = logical_divide(C, (permutation_mnk[1], permutation_mnk[2])) a_array = zipped_divide(t_array, map(make_layout, (atom_mnk[1], atom_mnk[2]))) # ((atom_m, atom_n), (rest_m, rest_n)) tv_array = composition(a_array, (layout_c(m), :)) # ((thr_id, val_idx), (rest_m, rest_n)) thr_tile = (:, (make_layout(size(thr_layout_vmnk, _2)), make_layout(size(thr_layout_vmnk, _3)))) # (:, (atom_M, atom_N)) # ((thr_id, val_idx), ((atom_M, atom_N), (rest_m/atom_M, rest_n/atom_N)) -> ((thr_id, (atom_M, atom_N)), (val_idx, (rest_m/atom_M, rest_n/atom_N))) thr_array = zipped_divide(tv_array, thr_tile) return thr_array end function thrfrg_A(m::TiledMMA, A::Layout) thr_layout_vmnk = get_thr_layout_vmnk(m) atom_mnk = shape_mnk(m.atom) permutation_mnk = m.permutation_mnk t_array = logical_divide(A, (permutation_mnk[1], permutation_mnk[3])) a_array = zipped_divide(t_array, map(make_layout, map(make_layout, (atom_mnk[1], atom_mnk[3])))) tv_array = composition(a_array, (layout_a(m), :)) thr_tile = (:, (make_layout(size(thr_layout_vmnk, _2)), make_layout(size(thr_layout_vmnk, _4)))) # (:, (thrM, thrK)) # ((thr_id, val_idx), ((M, K), (rest_m/M, rest_k/K)) -> ((thr_id, (thrM, thrK)), (val_idx, (rest_m/thrM, rest_k/thrK))) thr_array = zipped_divide(tv_array, thr_tile) return thr_array end function thrfrg_B(m::TiledMMA, B::Layout) thr_layout_vmnk = get_thr_layout_vmnk(m) atom_mnk = shape_mnk(m.atom) permutation_mnk = m.permutation_mnk t_array = logical_divide(B, (permutation_mnk[2], permutation_mnk[3])) b_array = zipped_divide(t_array, map(make_layout, map(make_layout, (atom_mnk[2], atom_mnk[3])))) tv_array = composition(b_array, (layout_b(m), :)) thr_tile = (:, (make_layout(size(thr_layout_vmnk, _3)), make_layout(size(thr_layout_vmnk, _4)))) # (:, (thrN, thrK)) # ((thr_id, val_idx), ((N, K), (rest_n/N, rest_k/K)) -> ((thr_id, (thrN, thrK)), (val_idx, (rest_n/thrN, rest_k/thrK))) thr_array = zipped_divide(tv_array, thr_tile) return thr_array end function get_slice(m::TiledMMA, thr_idx::DInt) @inline thr_vmnk = get_congr_coord(get_thr_layout_vmnk(m), thr_idx) return ThrMMA(m, thr_vmnk) end @inline Base.size(x::TiledMMA) = size(get_thr_layout_vmnk(x)) @generated function tile_size(m::TiledMMA, ::StaticInt{I}) where {I} @assert I in 1:3 m = m() core_size = shape_mnk(m)[I] * size(get_thr_layout_vmnk(m), I+1) s = m.permutation_mnk[I] perm_size = s isa Layout ? size(s) : s if perm_size isa Colon return :($core_size) else return :($(max(core_size, perm_size))) end end @inline tile_shape(m::TiledMMA) = (tile_size(m, One()), tile_size(m, _2), tile_size(m, _3)) function make_tiled_mma(mma_atom::AbstractMMAAtom, atom_layout::Layout=@Layout((1, 1, 1)), permutations::Tile=(:, :, :)) atom_layout_mnk = append(atom_layout, @Layout(1, 0), _3) permutation_mnk = append(permutations, :, _3) return TiledMMA{typeof(mma_atom), typeof(atom_layout_mnk), typeof(permutation_mnk)}() end """ make_tiled_mma(mma_op, atom_layout, permutations) Create a TiledMMA object from an MMA operation, atom layout, and permutations. See also [`print_typst`](@ref). ## Arguments - `mma_op::OP`: The MMA operation. - `atom_layout::Layout`: The layout of the atom. - `permutations::Tile`: The permutations of the atom. ## Examples ```julia julia> tiled_mma = make_tiled_mma(MMAOP_8x8x4_F32F16F16F32_NT(), @Layout((2,2), (2,1)), (@Layout((4,4,2), (1,8,4)), _32, _4)) TiledMMA ThrLayoutVMNK: ((_4, _2), _2, _2, _1):((_1, _16), _8, _4, _0) PermutationMNK: ((_4, _4, _2):(_1, _8, _4), _32, _4) MMAAtom Thread ID: (_4, _2):(_1, _16) Layout_A_TV: ((_4, _2), _4):((_8, _4), _1) Layout_B_TV: ((_4, _2), _4):((_8, _4), _1) Layout_C_TV: ((_2, _2, _2), (_2, _2, _2)):((_1, _16, _4), (_8, _2, _32)) ``` """ function make_tiled_mma(mma_op::OP, atom_layout::Layout=@Layout((1, 1, 1)), permutations::Tile=(:, :, :)) where {OP<: AbstractMMAOP} return make_tiled_mma(MMAAtom{OP}(), atom_layout, permutations) end function get_layoutC_MN(tiled_mma::TiledMMA) ref_C = make_layout((tile_size(tiled_mma, One()), tile_size(tiled_mma, _2))) layoutC_TV = thrfrg_C(tiled_mma, ref_C) layoutC_MN = withshape(right_inverse(layoutC_TV), shape(ref_C)) thrid_C = get_thr_layout_vmnk(tiled_mma)(:, :, :, Zero()) return layoutC_MN, thrid_C end function get_layoutA_MK(tiled_mma::TiledMMA) ref_A = make_layout((tile_size(tiled_mma, One()), tile_size(tiled_mma, _3))) layoutA_TV = thrfrg_A(tiled_mma, ref_A) layoutA_MK = withshape(right_inverse(layoutA_TV), shape(ref_A)) thrid_A = get_thr_layout_vmnk(tiled_mma)(:, :, Zero(), :) return layoutA_MK, thrid_A end function get_layoutB_NK(tiled_mma::TiledMMA) ref_B = make_layout((tile_size(tiled_mma, _2), tile_size(tiled_mma, _3))) layoutB_TV = thrfrg_B(tiled_mma, ref_B) layoutB_NK = withshape(right_inverse(layoutB_TV), shape(ref_B)) thrid_B = get_thr_layout_vmnk(tiled_mma)(:, Zero(), :, :) return layoutB_NK, thrid_B end @inline tile_size(m::ThrMMA, i::IntType) = tile_size(m.tiled_mma, i) function get_layoutC_TV(tiled_mma::TiledMMA) ref_C = make_layout((tile_size(tiled_mma, One()), tile_size(tiled_mma, _2))) layoutC_TV = thrfrg_C(tiled_mma, ref_C) thridx_to_thrid = right_inverse(get_thr_layout_vmnk(tiled_mma)) return composition(layoutC_TV, (thridx_to_thrid, :)) end function get_layoutA_TV(tiled_mma::TiledMMA) ref_A = make_layout((tile_size(tiled_mma, One()), tile_size(tiled_mma, _3))) layoutA_TV = thrfrg_A(tiled_mma, ref_A) thr_layout_vmnk = get_thr_layout_vmnk(tiled_mma) # insert N dimension to reflect the projection in A atile = (:, (make_layout((size(thr_layout_vmnk, _2), size(thr_layout_vmnk, _3)), (One(), Zero())), :)) thridx_to_thrid = right_inverse(thr_layout_vmnk) return composition(composition(layoutA_TV, (atile, :)), (thridx_to_thrid, :)) end function get_layoutB_TV(tiled_mma::TiledMMA) ref_B = make_layout((tile_size(tiled_mma, _2), tile_size(tiled_mma, _3))) layoutB_TV = thrfrg_B(tiled_mma, ref_B) thr_layout_vmnk = get_thr_layout_vmnk(tiled_mma) # insert M dimension to reflect the projection in B btile = (:, (make_layout((size(thr_layout_vmnk, _2), size(thr_layout_vmnk, _3)), (Zero(), One())), :)) thridx_to_thrid = right_inverse(thr_layout_vmnk) return composition(composition(layoutB_TV, (btile, :)), (thridx_to_thrid, :)) end function partition_C(m::ThrMMA, C::MoYeArray) thr_array = MoYeArray(pointer(C), thrfrg_C(m.tiled_mma, layout(C))) thr_vmn = (m.thr_vmnk[1], (m.thr_vmnk[2], m.thr_vmnk[3])) # (V, (M, N)) return view(thr_array, thr_vmn, (:, repeat(:, rank(layout(thr_array)[2][2])))) end function partition_A(m::ThrMMA, A::MoYeArray) thr_array = MoYeArray(pointer(A), thrfrg_A(m.tiled_mma, layout(A))) thr_vmk = (m.thr_vmnk[1], (m.thr_vmnk[2], m.thr_vmnk[4])) # (V, (M, K)) return view(thr_array, thr_vmk, (:, repeat(:, rank(layout(thr_array)[2][2])))) end function partition_B(m::ThrMMA, B::MoYeArray) thr_array = MoYeArray(pointer(B), thrfrg_B(m.tiled_mma, layout(B))) thr_vnk = (m.thr_vmnk[1], (m.thr_vmnk[3], m.thr_vmnk[4])) # (V, (N, K)) return view(thr_array, thr_vnk, (:, repeat(:, rank(layout(thr_array)[2][2])))) end function partition_fragment_C(m::ThrMMA, C::MoYeArray) @inline return make_fragment_C(m.tiled_mma.atom, partition_C(m, C)) end function partition_fragment_A(m::ThrMMA, A::MoYeArray) @inline return make_fragment_A(m.tiled_mma.atom, partition_A(m, A)) end function partition_fragment_B(m::ThrMMA, B::MoYeArray) @inline return make_fragment_B(m.tiled_mma.atom, partition_B(m, B)) end function partition_shape_C(m::TiledMMA, shape_MN::StaticIntTuple{R}) where {R} @assert R >= 2 atom_mnk = shape_mnk(m) V = shape(layout_c(m))[2] M = shape_div(shape_MN[1], atom_mnk[1]* m.thr_vmnk[2]) N = shape_div(shape_MN[2], atom_mnk[2]* m.thr_vmnk[3]) return (V, M, N, shape_MN[3:R]...) end function partition_fragment_C(m::TiledMMA, shape_MN::StaticIntTuple) @inline return MoYeArray{frgtype_c(m)}(undef, partition_shape_C(m, shape_MN)) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
450
## Shared Memory, static allocation but the layout can be dynamic @inline SharedMemory(T, ::StaticInt{L}) where {L} = CUDA.emit_shmem(T, Val(L)) function MoYeSharedArray(::Type{T}, l::StaticLayout) where {T} @inline smem = MoYe.SharedMemory(T, cosize(l)) return MoYeArray(smem, l) end function MoYeSharedArray(::Type{T}, s::StaticIntTuple) where {T} @inline l = make_layout(s) return MoYeSharedArray(T, l) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1460
abstract type AbstractCopyTraits{OP} <: AbstractTraits{OP} end struct CopyTraits{C <: AbstractCopyOp, LT, LS, LD, LR} <: AbstractCopyTraits{C} copy_op::C threadid::LT srclayout::LS dstlayout::LD reflayout::LR end function CopyTraits{OP}(threadid, srclayout, dstlayout, reflayout=srclayout) where {OP<:AbstractCopyOp} copy_op = OP() return CopyTraits{typeof(copy_op), typeof(threadid), typeof(srclayout), typeof(dstlayout), typeof(reflayout)}(copy_op, threadid, srclayout, dstlayout, reflayout) end function CopyTraits{UniversalCopy{S,D}}() where {S,D} threadid = make_layout(One()) # 1 thread per operation srclayout = make_layout((One(), static(sizeof(S) * 8))) # thr -> bit dstlayout = make_layout((One(), static(sizeof(D) * 8))) return CopyTraits{UniversalCopy{S,D}}(threadid, srclayout, dstlayout) end function copyto_unpack!(::AbstractCopyTraits{OP}, dst::MoYeArray, src::MoYeArray) where {OP} cpop = OP() registers_src = cpop.SRegisters registers_dst = cpop.DRegisters regtype_src = eltype(registers_src) regtype_dst = eltype(registers_dst) regnum_src = length(registers_src) regnum_dst = length(registers_dst) rs = recast(regtype_src, src) rd = recast(regtype_dst, dst) @assert size(rs.layout) == StaticInt{regnum_src}() @assert size(rd.layout) == StaticInt{regnum_dst}() copyto!(cpop, rd, rs) return dst end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1043
function CopyTraits{CPOP_ASYNC_CACHEALWAYS{S,D}}() where {S,D} threadid = make_layout(One()) # 1 thread per operation srclayout = make_layout((One(), static(sizeof(S)*8))) dstlayout = make_layout((One(), static(sizeof(D)*8))) return CopyTraits{CPOP_ASYNC_CACHEALWAYS{S,D}}(threadid, srclayout, dstlayout) end function CopyTraits{CPOP_ASYNC_CACHEGLOBAL{S,D}}() where {S,D} threadid = make_layout(One()) # 1 thread per operation srclayout = make_layout((One(), static(sizeof(S)*8))) dstlayout = make_layout((One(), static(sizeof(D)*8))) return CopyTraits{CPOP_ASYNC_CACHEALWAYS{S,D}}(threadid, srclayout, dstlayout) end function select_elementwise_copy(src::MoYeArray{TS}, dest::MoYeArray{TD}) where {TS, TD} @static if CP_SYNC_ENABLED if isgmem(src) && issmem(dest) && sizeof(TS) == sizeof(TD) return CPOP_ASYNC_CACHEALWAYS{TS,TD}() else return UniversalCopy{TS,TD}() end else return UniversalCopy{TS,TD}() end end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1611
function CopyTraits{LDSM_U32x1_N}() threadid = @Layout 32 # 32 threads per operation srclayout = @Layout ((8, 4), 128) ((128, 0), 1) # thr -> bit dstlayout = @Layout (32, 32) (32, 1) return CopyTraits{LDSM_U32x1_N}(threadid, srclayout, dstlayout, dstlayout) end function CopyTraits{LDSM_U32x2_N}() threadid = @Layout 32 srclayout = @Layout ((16, 2), 128) ((128, 0), 1) dstlayout = @Layout (32, (32, 2)) (32, (1, 1024)) return CopyTraits{LDSM_U32x2_N}(threadid, srclayout, dstlayout, dstlayout) end function CopyTraits{LDSM_U32x4_N}() threadid = @Layout 32 srclayout = @Layout (32, 128) (128, 1) dstlayout = @Layout (32, (32, 4)) (32, (1, 1024)) return CopyTraits{LDSM_U32x4_N}(threadid, srclayout, dstlayout, dstlayout) end function CopyTraits{LDSM_U16x2_T}() threadid = @Layout 32 srclayout = @Layout ((8, 4), 128) ((128, 0), 1) dstlayout = @Layout ((4, 8), (16, 2)) ((256, 16), (1, 128)) return CopyTraits{LDSM_U16x2_T}(threadid, srclayout, dstlayout, dstlayout) end function CopyTraits{LDSM_U16x4_T}() threadid = @Layout 32 srclayout = @Layout ((16, 2), 128) ((128, 0), 1) dstlayout = @Layout ((4, 8), (16, 2, 2)) ((256, 16), (1, 128, 1024)) return CopyTraits{LDSM_U16x4_T}(threadid, srclayout, dstlayout, dstlayout) end function CopyTraits{LDSM_U16x8_T}() threadid = @Layout 32 srclayout = @Layout (32, 128) (128, 1) dstlayout = @Layout ((4, 8), (16, 2, 4)) ((256, 16), (1, 128, 1024)) return CopyTraits{LDSM_U16x8_T}(threadid, srclayout, dstlayout, dstlayout) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
3187
abstract type AbstractTraits{OP} end abstract type AbstractMMATraits{OP <: AbstractMMAOP} <: AbstractTraits{OP} end # Traits of AbstractMMATraits # logical types, delegate to mma ops @inline valtype_a(::AbstractMMATraits{OP}) where {OP} = valtype_a(OP()) @inline valtype_b(::AbstractMMATraits{OP}) where {OP} = valtype_b(OP()) @inline valtype_c(::AbstractMMATraits{OP}) where {OP} = valtype_c(OP()) @inline valtype_d(::AbstractMMATraits{OP}) where {OP} = valtype_d(OP()) # register types @inline regtype_a(::AbstractMMATraits{OP}) where {OP} = regtype_a(OP()) @inline regtype_b(::AbstractMMATraits{OP}) where {OP} = regtype_b(OP()) @inline regtype_c(::AbstractMMATraits{OP}) where {OP} = regtype_c(OP()) @inline regtype_d(::AbstractMMATraits{OP}) where {OP} = regtype_d(OP()) # register numbers @inline regnum_a(::AbstractMMATraits{OP}) where {OP} = regnum_a(OP()) @inline regnum_b(::AbstractMMATraits{OP}) where {OP} = regnum_b(OP()) @inline regnum_c(::AbstractMMATraits{OP}) where {OP} = regnum_c(OP()) @inline regnum_d(::AbstractMMATraits{OP}) where {OP} = regnum_d(OP()) @inline shape_mnk(::AbstractMMATraits{OP}) where {OP} = shape_mnk(OP()) # the following functions need to be implemented for each mma trait thr_id(::AbstractMMATraits) = error("thr_id not implemented") # Thr-Val layouts for A, B, C layout_a(::AbstractMMATraits) = error("layout_a not implemented") layout_b(::AbstractMMATraits) = error("layout_b not implemented") layout_c(::AbstractMMATraits) = error("layout_c not implemented") struct MMATraits{OP <: AbstractMMAOP, D, A, B, C} <: AbstractMMATraits{OP} end function MMATraits{OP}() where {OP<: AbstractMMAOP} return MMATraits{OP, valtype_d(OP()), valtype_a(OP()), valtype_b(OP()), valtype_c(OP())}() end @inline shape_mnk(::MMATraits{<:UniversalFMA}) = static((1, 1, 1)) @inline thr_id(::MMATraits{<:UniversalFMA}) = @Layout 1 @inline layout_a(::MMATraits{<:UniversalFMA}) = @Layout (1, 1) @inline layout_b(::MMATraits{<:UniversalFMA}) = @Layout (1, 1) @inline layout_c(::MMATraits{<:UniversalFMA}) = @Layout (1, 1) # utilities @inline get_regtypes(traits::AbstractMMATraits) = (regtype_d(traits), regtype_a(traits), regtype_b(traits), regtype_c(traits)) @inline get_regnums(traits::AbstractMMATraits) = (regnum_d(traits), regnum_a(traits), regnum_b(traits), regnum_c(traits)) function mma_unpack!(traits::MMATraits{OP, TD, TA, TB, TC}, D::LocalArray{TD}, A::LocalArray{TA}, B::LocalArray{TB}, C::LocalArray{TC}) where {OP, TD, TA, TB, TC} reg_type_D, reg_type_A, reg_type_B, reg_type_C = get_regtypes(traits) reg_num_D, reg_num_A, reg_num_B, reg_num_C = get_regnums(traits) rD = recast(reg_type_D, D) rA = recast(reg_type_A, A) rB = recast(reg_type_B, B) rC = recast(reg_type_C, C) @assert size(layout(rD)) == reg_num_D @assert size(layout(rA)) == reg_num_A @assert size(layout(rB)) == reg_num_B @assert size(layout(rC)) == reg_num_C return fma!(OP(), rD, rA, rB, rC) end include("mma_sm70.jl") include("mma_sm75.jl") include("mma_sm80.jl")
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1035
function make_mmatraits_sm70(mmaops) for mmaop in mmaops @eval @inline thr_id(::MMATraits{$mmaop}) = @Layout (4, 2) (1, 16) mmaop_ins = @eval $mmaop() ALayout = alignment_a(mmaop_ins) == :row ? @Layout((8, 4), (1, 8)) : @Layout(((4, 2), 4), ((8, 4), 1)) BLayout = alignment_b(mmaop_ins) == :col ? @Layout((8, 4), (1, 8)) : @Layout(((4, 2), 4), ((8, 4), 1)) CLayout = valtype_c(mmaop_ins) == Float16 ? @Layout((8, 8), (1, 8)) : @Layout(((2,2,2), (2,2,2)), ((1,16,4), (8,2,32))) @eval layout_a(::MMATraits{$mmaop}) = $ALayout @eval layout_b(::MMATraits{$mmaop}) = $BLayout @eval layout_c(::MMATraits{$mmaop}) = $CLayout end end make_mmatraits_sm70(( :MMAOP_8x8x4_F32F16F16F32_TN, :MMAOP_8x8x4_F32F16F16F32_NT, :MMAOP_8x8x4_F32F16F16F32_TT, :MMAOP_8x8x4_F32F16F16F32_NN, :MMAOP_8x8x4_F16F16F16F16_TN, :MMAOP_8x8x4_F16F16F16F16_NT, :MMAOP_8x8x4_F16F16F16F16_TT, :MMAOP_8x8x4_F16F16F16F16_NN, ))
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
728
@inline thr_id(::MMATraits{MMAOP_16x8x8_F32F16F16F32_TN}) = @Layout(32) @inline layout_a(::MMATraits{MMAOP_16x8x8_F32F16F16F32_TN}) = @Layout ((4, 8), (2,2)) ((32, 1), (16, 8)) @inline layout_b(::MMATraits{MMAOP_16x8x8_F32F16F16F32_TN}) = @Layout ((4, 8), 2) ((16, 1), 8) @inline layout_c(::MMATraits{MMAOP_16x8x8_F32F16F16F32_TN}) = @Layout ((4, 8), (2, 2)) ((32, 1), (16, 8)) @inline thr_id(::MMATraits{MMAOP_8x8x16_S32S8S8S32_TN}) = @Layout(32) @inline layout_a(::MMATraits{MMAOP_8x8x16_S32S8S8S32_TN}) = @Layout ((4, 8), 4) ((32, 1), 8) @inline layout_b(::MMATraits{MMAOP_8x8x16_S32S8S8S32_TN}) = @Layout ((4, 8), 2) ((32, 1), 8) @inline layout_c(::MMATraits{MMAOP_8x8x16_S32S8S8S32_TN}) = @Layout ((4, 8), 2) ((16, 1), 8)
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
4372
function _get_layouts(::Tuple{StaticInt{16}, StaticInt{8}, StaticInt{8}}, AElType, CElType) A = @Layout ((4, 8), (2, 2)) ((32, 1), (16, 8)) B = @Layout ((4, 8), 2) ((16, 1), 8) C = A return A, B, C end function _get_layouts(::Tuple{StaticInt{16}, StaticInt{8}, StaticInt{16}}, AElType::Type{<:AbstractFloat}, CElType::Type{<:AbstractFloat}) A = @Layout ((4, 8), (2, 2, 2)) ((32, 1), (16, 8, 128)) B = @Layout ((4, 8), (2, 2)) ((16, 1), (8, 64)) C = @Layout ((4, 8), (2, 2)) ((32, 1), (16, 8)) return A, B, C end function _get_layouts(::Tuple{StaticInt{16}, StaticInt{8}, StaticInt{16}}, AElType, CElType) A = @Layout ((4, 8), (4, 2, 2)) ((64,1), (16, 8, 256)) B = @Layout ((4, 8), (4, 2)) ((32, 1), (8, 128)) C = @Layout ((4, 8), (2, 2)) ((32, 1), (16, 8)) return A, B, C end function _get_layouts(::Tuple{StaticInt{8}, StaticInt{8}, StaticInt{4}}, AElType, CElType) A = @Layout ((4, 8), 1) ((8, 1), 0) B = A C = @Layout ((4, 8), 2) ((16, 1), 8) return A, B, C end function _get_layouts(::Tuple{StaticInt{8}, StaticInt{8}, StaticInt{16}}, AElType, CElType) A = @Layout ((4, 8), 4) ((32, 1), 8) B = A C = @Layout ((4, 8), 2) ((16, 1), 8) return A, B, C end function _get_layouts(::Tuple{StaticInt{16}, StaticInt{8}, StaticInt{32}}, AElType, CElType) A = @Layout ((4, 8), (4, 2, 2)) ((64, 1), (16, 8, 256)) B = @Layout ((4, 8), (4, 2)) ((32, 1), (8, 128)) C = @Layout ((4, 8), (2, 2)) ((32, 1), (16, 8)) return A, B, C end function make_sm80_mmatraits(mmaops) for mmaop in mmaops @eval @inline thr_id(::MMATraits{$mmaop}) = @Layout(32) mmop_ins = @eval $mmaop() A, B, C = _get_layouts(shape_mnk(mmop_ins), valtype_a(mmop_ins), valtype_c(mmop_ins)) @eval layout_a(::MMATraits{$mmaop}) = $A @eval layout_b(::MMATraits{$mmaop}) = $B @eval layout_c(::MMATraits{$mmaop}) = $C end end # 16x8x16 make_sm80_mmatraits(( # 16x8x8 :MMAOP_16x8x8_F16F16F16F16_TN, :MMAOP_16x8x8_F32BF16BF16F32_TN, # 16x8x16 :MMAOP_16x8x16_F16F16F16F16_TN, :MMAOP_16x8x16_F32F16F16F32_TN, :MMAOP_16x8x16_F32BF16BF16F32_TN, :MMAOP_16x8x16_S32S8S8S32_TN, :MMAOP_16x8x16_S32S8S8S32_TN_SATURATE, :MMAOP_16x8x16_S32S8U8S32_TN, :MMAOP_16x8x16_S32S8U8S32_TN_SATURATE, :MMAOP_16x8x16_S32U8S8S32_TN, :MMAOP_16x8x16_S32U8S8S32_TN_SATURATE, :MMAOP_16x8x16_S32U8U8S32_TN, :MMAOP_16x8x16_S32U8U8S32_TN_SATURATE, # 8x8x4 TODO: add complex types :MMAOP_8x8x4_F64F64F64F64_TN, # 8x8x16 #:MMAOP_8x8x16_S32S8S8S32_TN, :MMAOP_8x8x16_S32S8S8S32_TN_SATURATE, :MMAOP_8x8x16_S32S8U8S32_TN, :MMAOP_8x8x16_S32S8U8S32_TN_SATURATE, :MMAOP_8x8x16_S32U8S8S32_TN, :MMAOP_8x8x16_S32U8S8S32_TN_SATURATE, :MMAOP_8x8x16_S32U8U8S32_TN, :MMAOP_8x8x16_S32U8U8S32_TN_SATURATE, # 16x8x32 :MMAOP_16x8x32_S32S8S8S32_TN, :MMAOP_16x8x32_S32S8S8S32_TN_SATURATE, :MMAOP_16x8x32_S32S8U8S32_TN, :MMAOP_16x8x32_S32S8U8S32_TN_SATURATE, :MMAOP_16x8x32_S32U8S8S32_TN, :MMAOP_16x8x32_S32U8S8S32_TN_SATURATE, :MMAOP_16x8x32_S32U8U8S32_TN, :MMAOP_16x8x32_S32U8U8S32_TN_SATURATE, )) # special cases @inline thr_id(::MMATraits{MMAOP_16x8x8_F32TF32TF32F32_TN}) = @Layout(32) @inline layout_a(::MMATraits{MMAOP_16x8x8_F32TF32TF32F32_TN}) = @Layout ((4, 8), (2, 2)) ((16, 1), (8, 64)) @inline layout_b(::MMATraits{MMAOP_16x8x8_F32TF32TF32F32_TN}) = @Layout ((4, 8), 2) ((8, 1), 32) @inline layout_c(::MMATraits{MMAOP_16x8x8_F32TF32TF32F32_TN}) = @Layout ((4, 8), (2, 2)) ((32, 1), (16, 8)) @inline thr_id(::MMATraits{MMAOP_16x8x4_F32TF32TF32F32_TN}) = @Layout(32) @inline layout_a(::MMATraits{MMAOP_16x8x4_F32TF32TF32F32_TN}) = @Layout ((4, 8), 1) ((8, 1),08) @inline layout_b(::MMATraits{MMAOP_16x8x4_F32TF32TF32F32_TN}) = @Layout ((4, 8), (2, 2)) ((32, 1), (16, 8)) @inline thr_id(::MMATraits{MMAOP_16x8x256_S32B1B1S32_TN_XORPOPC}) = @Layout(32) @inline layout_a(::MMATraits{MMAOP_16x8x256_S32B1B1S32_TN_XORPOPC}) = @Layout (32, (8, 4, 2, 2)) (64, (64, 16 ,8, 2048)) @inline layout_b(::MMATraits{MMAOP_16x8x256_S32B1B1S32_TN_XORPOPC}) = @Layout (32, (32, 2)) (64, (1, 1024))
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
4049
using MoYe, Test function test_alloc() slayout = @Layout (2, 3) x = MoYeArray{Float32}(undef, slayout) fill!(x, 1.0f0) return sum(x) end @test @allocated(test_alloc()) == 0 @testset "Constructors" begin @test_nowarn MoYeArray{Float32}(undef, static((2, 3))) @test_throws MethodError MoYeArray{Float32}(undef, (2, 3)) @test_nowarn MoYeArray{Float32}(undef, static((2, 3)), GenRowMajor) @test_nowarn MoYeArray{Float32}(undef, static((2, 3)), GenColMajor) A = rand(3) ca = MoYeArray(pointer(A), static((3, 1))) ca2 = MoYeArray(pointer(A), static((3, 1)), GenRowMajor) @test ca.engine isa ViewEngine @test ca2.engine isa ViewEngine end @testset "Static indexing" begin A = MoYeArray{Float64}(undef, @Layout((3,4))) @test A[_1] == A[1] A[_1] = 1.0 @test A[_1] == 1.0 data = rand(3,4) B = MoYeArray(pointer(data), @Layout((3,4))) @test B[_1] == B[1] B[_1] = 1.0 @test B[_1] == 1.0 end @testset "Array Operations" begin @testset "View" begin ca = MoYeArray{Float32}(undef, static((2, 3))) va = view(ca, :, 1) @test va isa MoYeArray @test va.engine isa ViewEngine @test va.layout.shape == tuple(_2) va2 = view(ca, :, :) @test va2 isa MoYeArray @test va2.engine isa ViewEngine @test va2.layout.shape == tuple(_2, _3) end @testset "Copy" begin ca = MoYeArray{Float32}(undef, static((2, 3))) zeros!(ca) ca2 = copy(ca) @test ca2 isa MoYeArray @test ca2.engine isa ArrayEngine @test ca2 == ca A = ones(6) ca3 = MoYeArray(pointer(A), _6) ca4 = copy(ca3) @test ca4 isa MoYeArray @test ca4.engine isa ArrayEngine @test ca4 == ca3 end @testset "similar" begin ca = MoYeArray{Float32}(undef, static((2, 3))) ca2 = similar(ca) @test ca2 isa MoYeArray @test ca2.engine isa ArrayEngine @test ca2.layout == ca.layout A = ones(6) ca3 = MoYeArray(pointer(A), _6) ca4 = similar(ca3) @test ca4 isa MoYeArray @test ca4.engine isa ArrayEngine @test ca4.layout == ca3.layout end @testset "recast" begin data = collect(1:12) a = MoYeArray(pointer(data), @Layout((4,3))) @testset "downcast" begin b = recast(Int32, a) @test b.layout == @Layout((8,3)) @test b.engine isa ViewEngine @testset "dynamic layout" begin a′ = MoYeArray(pointer(data), make_layout((4,3))) b′ = recast(Int32, a′) @test b′.layout == make_layout((8,3)) @test b′.engine isa ViewEngine end end @testset "upcast" begin c = recast(UInt128, a) @test c.layout == @Layout((2,3)) @test c.engine isa ViewEngine end end end @testset "BLAS" begin @testset "fill! and sum" begin ca = MoYeArray{Float32}(undef, static((2, 3))) fill!(ca, 1.0f0) @test all(ca .== 1.0f0) @test sum(ca) == 6.0f0 end end @testset "Unsqueeze" begin a = MoYeArray{Float32}(undef, static((2, 3))) b = MoYe.append_dim(a, _3); @test b.layout == @Layout (2, 3, 1) (1,2,0) c = MoYe.prepend_dim(a, _3); @test c.layout == @Layout (1, 2, 3) (0,1,2) end @testset "Recast" begin x = MoYeArray{Float32}(undef, static((4, 3))) zeros!(x) @testset "Upcast" begin x2 = recast(Float64, x) @test x2 isa MoYeArray{Float64} @test x2.layout == @Layout (2, 3) @test x == recast(Float32, x2) end @testset "Downcast" begin x2 = recast(Float16, x) zeros!(x2) @test x2 isa MoYeArray{Float16} @test x2.layout == @Layout (8, 3) @test x == recast(Float32, x2) end end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
393
using MoYe, Test x = MoYeArray{Float32}(undef, @Layout((3,2))) x2 = x .+ x x3 = x .+ 1 @test x2 isa MoYeArray{Float32} @test x2.layout == @Layout((3,2)) @test x2.engine isa ArrayEngine @test x3 isa MoYeArray{Float32} @test x3.layout == @Layout((3,2)) @test x3.engine isa ArrayEngine y = MoYeArray{Float32}(undef, @Layout((3,), (2,))) z = x .+ y @test z.layout == x.layout
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
6140
using MoYe, Test, CUDA using Core: LLVMPtr @testset "Universal Copy" begin @testset "Explicit Vectorized Copy" begin @testset "Sequential Copy" begin # single thread a = [Int32(i) for i in 1:8] pa = reinterpret(LLVMPtr{Int32, AS.Generic}, pointer(a)) x = MoYeArray(pa, make_layout((4,2))) b = [Int32(i) for i in 9:16] pb = reinterpret(LLVMPtr{Int32, AS.Generic}, pointer(b)) y = MoYeArray(pb, make_layout((4,2))) GC.@preserve a b begin MoYe._copyto_vec!(y, x, Int128) @test y == x end end @testset "Parallized Copy" begin function parallelized_copy(a, b, thread_layout) for i in 1:size(thread_layout) # on gpu this is parallelized instead of sequential thread_tile_a = @parallelize a thread_layout i thread_tile_b = @parallelize b thread_layout i #display(thread_tile_a) MoYe._copyto_vec!(thread_tile_b, thread_tile_a, Int32) # no vectorization here end end a_data = [Int32(i) for i in 1:6*6] b_data = [Int32(i) for i in 6*6+1:6*6*2] GC.@preserve a_data b_data begin thread_layout = @Layout((2,3)) layout = make_layout((6,6)) pa = pointer(a_data) pa = reinterpret(LLVMPtr{Int32, AS.Generic}, pa) a = MoYeArray(pa, layout) pb = pointer(b_data) pb = reinterpret(LLVMPtr{Int32, AS.Generic}, pb) b = MoYeArray(pb, layout) parallelized_copy(a, b, thread_layout) end @test b == a end end @testset "Auto-vectorized Copy" begin @testset "Sequential Copy" begin a = [Int32(i) for i in 1:8] pa = reinterpret(LLVMPtr{Int32, AS.Generic}, pointer(a)) x = MoYeArray(pa, @Layout((4,2))) b = [Int32(i) for i in 9:16] pb = reinterpret(LLVMPtr{Int32, AS.Generic}, pointer(b)) y = MoYeArray(pb, @Layout((4,2))) GC.@preserve b a begin copyto!(y, x) # should recast to UInt128 @test y == x end end @testset "Parallized Copy" begin function parallelized_copy(a, b, thread_layout) for i in 1:size(thread_layout) # on gpu this is parallelized instead of sequential thread_tile_a = @parallelize a thread_layout i thread_tile_b = @parallelize b thread_layout i #display(thread_tile_a) copyto!(thread_tile_b, thread_tile_a) end end a_data = [Int32(i) for i in 1:6*6] b_data = [Int32(i) for i in 6*6+1:6*6*2] GC.@preserve a_data b_data begin thread_layout = @Layout((2,3)) layout = @Layout((6,6)) pa = pointer(a_data) pa = reinterpret(LLVMPtr{Int32, AS.Generic}, pa) a = MoYeArray(pa, layout) pb = pointer(b_data) pb = reinterpret(LLVMPtr{Int32, AS.Generic}, pb) b = MoYeArray(pb, layout) parallelized_copy(a, b, thread_layout) end @test b == a end end end function tiled_copy_kernel(g_in, g_out, tiled_copy, smem_layout) t_g_in = MoYeArray(pointer(g_in), smem_layout) t_g_out = MoYeArray(pointer(g_out), smem_layout) t_smem=MoYeArray{UInt16}(undef, smem_layout) for tid in 1:32 for i in tid:size(tiled_copy):size(t_smem.layout) t_smem[i] = t_g_in[i] end end for tid in 1:size(tiled_copy) thr_copy = get_slice(tiled_copy, tid) tXsX = partition_S(thr_copy, t_smem) tXgX = partition_D(thr_copy, t_g_out) tXrX = MoYeArray{UInt16}(undef, tXgX.layout.shape) copyto!(tiled_copy, tXrX, tXsX) copyto!(tXgX, tXrX) end end @testset "Tiled Copy" begin @testset "UniversalCopy" begin @testset "32 x 32" begin g_in = [UInt16(i) for i in 1:32*32] g_out = zeros(UInt16, 32*32) smem_layout = @Layout (32,32) (1,32) tiled_copy = make_tiled_copy(MoYe.CopyAtom{MoYe.UniversalCopy{UInt16, UInt16}, UInt16}(), @Layout((16,2)), @Layout((2,4))) tiled_copy_kernel(g_in, g_out, tiled_copy, smem_layout) @test g_out == g_in end @testset "32 x 8" begin g_in = [UInt16(i) for i in 1:32*8] g_out = zeros(UInt16, 32*8) smem_layout = @Layout (32, (2, 4)) (2, (1, 64)) tiled_copy = make_tiled_copy(MoYe.CopyAtom{MoYe.UniversalCopy{UInt16, UInt16}, UInt16}(), @Layout((32,1)), @Layout((1,8))) tiled_copy_kernel(g_in, g_out, tiled_copy, smem_layout) @test g_out == g_in end end @testset "LDMATRIX" begin @testset "32 x 32" begin g_in = [UInt16(i) for i in 1:32*32] g_out = zeros(UInt16, 32*32) smem_layout = @Layout (32,32) (1,32) for ldmatrix in [:LDSM_U32x1_N, :LDSM_U32x2_N, :LDSM_U32x4_N] @testset "$ldmatrix" begin @eval tiled_copy = make_tiled_copy(MoYe.CopyAtom{$ldmatrix, UInt16}(), @Layout((16,2)), @Layout((2,4))) end end tiled_copy = make_tiled_copy(MoYe.CopyAtom{MoYe.UniversalCopy{UInt16, UInt16}, UInt16}(), @Layout((16,2)), @Layout((2,4))) tiled_copy_kernel(g_in, g_out, tiled_copy, smem_layout) @test g_out == g_in end end end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
159
using MoYe, Test function test_alloc() x = ArrayEngine{Float32}(one, static(10)) @gc_preserve sum(x) end @test @allocated(test_alloc()) == 0
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
3946
using MoYe, Test, JET @testset "Product" begin @inferred MoYe.product_like(((_2, _2), _2), ((_32, _512), _2048)) end @testset "Capacity" begin @test capacity(((2, 3, (1, 1)), 4)) == 24 @test_opt capacity(((2, 3, (1, 1)), 4)) end @testset "Repeat like" begin @test MoYe.repeat_like((2,3,4), 1) == (1,1,1) @test MoYe.repeat_like(typeof((2,3,4)), 1) == (1,1,1) @test_opt MoYe.repeat_like((2,3,4), 1) @test_opt MoYe.repeat_like(typeof((2,3,4)), 1) @test MoYe.repeat_like(((2,3), (4,5)), 1) == ((1,1), (1,1)) @test MoYe.repeat_like(typeof(((2,3), (4,5))), 1) == ((1,1), (1,1)) @test_opt MoYe.repeat_like(((2,3), (4,5)), 1) @test_opt MoYe.repeat_like(typeof(((2,3), (4,5))), 1) end @testset "Shape Division" begin @test MoYe.shape_div((12, 3), (2, 3)) == (6, 1) @test_opt MoYe.shape_div((12, 3), (2, 3)) @test MoYe.shape_div((12, 3), 3) == (4, 3) @test_opt MoYe.shape_div((12, 3), 3) @test MoYe.shape_div(12, (3, 4)) == 1 @test_opt MoYe.shape_div(12, (3, 4)) end @testset "Slice" begin @test MoYe.slice((3, 4), (2, :)) == (4,) @test_opt MoYe.slice((3, 4), (2, :)) @test MoYe.slice((3, (4, 5)), (:, (2, :))) == (3, 5) @test_opt MoYe.slice((3, (4, 5)), (:, (2, :))) @test MoYe.slice(((2, 4), (4, 2)), (:, (:, :))) == ((2, 4), 4, 2) @test_opt MoYe.slice(((2, 4), (4, 2)), (:, (:, :))) end @testset "Elementwise Comparison" begin @test elem_less((1, 2, 3), (1, 2, 4)) == true @test elem_less((1, 2, 3), (1, 2, 3)) == false @test elem_less((1, 2, 3), (1, 2, 3, 1)) == true @test elem_less((1, 2, 3, 1), (1, 2, 3)) == false @test elem_less(((1, 2, 3),), ((1, 2, 3),)) == false @test elem_less(((1, 2, 3),), ((1, 2, 4),)) == true @test elem_less(((1, 2, 3),), ((1, 2, 3), 1)) == true @test elem_less(((1, 2, 3), 1), ((1, 2, 3), 2)) == true @test elem_less(((1, 2, 3), 2), ((1, 2, 4), 1)) == false @test elem_less(((1, 2), (3, 4)), ((5, 6), (7, 8))) == true @test elem_less(((1, 2), (3, 4)), ((1, 2), (7, 8))) == true @test elem_less(((1, 2), (3, 4)), ((1, 2), (1, 8))) == false @test elem_less(((1, 2), (3, 4)), ((3, 4), (3, 4))) == false @test elem_less(((1, 2), (3, 4)), ((3, 4), (3, 4, 1))) == true @test elem_less(((1, 2), (3, 4)), ((3, 4), (3, 4), 1)) == true @test_opt elem_less(((1, 2), (3, 4)), ((3, 4), (3, 4), 1)) end @testset "Colexicographical Comparison" begin @test colex_less((1, 2, 3), (1, 2, 4)) == true @test colex_less((1, 2, 3), (1, 2, 3)) == false @test colex_less((1, 2, 3), (1, 1, 2, 3)) == true @test colex_less((1, 1, 2, 3), (1, 2, 3)) == false @test colex_less(((1, 2, 3),), ((1, 2, 3),)) == false @test colex_less(((0, 2, 3),), ((1, 2, 3),)) == true @test colex_less(((1, 2, 3),), (1, (1, 2, 3))) == true @test colex_less((1, (1, 2, 3)), (2, (1, 2, 3))) == true @test colex_less((2, (1, 2, 3)), (1, (2, 2, 3))) == true @test colex_less(((1, 2), (3, 4)), ((5, 6), (7, 8))) == true @test colex_less(((1, 2), (3, 4)), ((2, 3), (3, 4))) == true @test colex_less(((1, 2), (3, 4)), ((1, 3), (3, 4))) == true @test colex_less(((5, 4), (3, 4)), ((3, 4), (3, 4))) == false @test colex_less(((1, 2), (3, 4)), ((1, 1, 2), (3, 4))) == true @test_opt colex_less(((1, 2), (3, 4)), ((1, 1, 2), (3, 4))) end @testset "Increment" begin @test increment((1, 1), (3, 4)) == (2, 1) @test increment((3, 1), (3, 4)) == (1, 2) @test increment((3, 4), (3, 4)) == (1, 1) @test increment((2, (2, 1), 1), (2, (2, 3), 3)) == (1, (1, 2), 1) @test increment((2, (2, 2), 1), (2, (2, 3), 3)) == (1, (1, 3), 1) @test increment((2, (2, 3), 1), (2, (2, 3), 3)) == (1, (1, 1), 2) @test increment((2, (2, 3), 3), (2, (2, 3), 3)) == (1, (1, 1), 1) @test_opt increment((2, (2, 3), 3), (2, (2, 3), 3)) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
19783
using MoYe, Test, JET using Static: One @testset "Int32" begin a = @Layout (2, (2, 2)) (2, (1, 4)) idx_a = a(Int32(2)) @test typeof(idx_a) == Int32 b = make_layout((Int32(2), Int32(4))) idx_b = b(Int32(2)) @test typeof(idx_b) == Int32 end @testset "Macro Layout" begin @test @Layout((2, (2, 2)), (4, (1, 2))) == make_layout(static((2, (2, 2))), static((4, (1, 2)))) @test @Layout(2, 4) == make_layout(_2, _4) @test @Layout((2, (2, 2))) == make_layout(static((2, (2, 2)))) @test @Layout(2) == make_layout(_2) @test @Layout((2, (2, 2)), GenColMajor) == make_layout(static((2, (2, 2))), GenColMajor) @test @Layout(2, GenColMajor) == make_layout(_2, GenColMajor) @test @Layout((2, (2, 2)), GenRowMajor) == make_layout(static((2, (2, 2))), GenRowMajor) @test @Layout(2, GenRowMajor) == make_layout(_2, GenRowMajor) end @testset "Flatten" begin @test flatten(make_layout(((4, 3), 1), ((3, 1), 0))) == make_layout((4, 3, 1), (3, 1, 0)) @test_opt flatten(make_layout(((4, 3), 1), ((3, 1), 0))) end @testset "Coalesce" begin @test coalesce(@Layout((2, (1, 6)), (1, (6, 2)))) == @Layout(12, 1) @test_opt coalesce(@Layout((2, (1, 6)), (1, (6, 2)))) @test_opt MoYe.bw_coalesce(MoYe.One(), (1,), (48,), 2, 1) function test_coalesce(layout) @test_opt coalesce(layout) coalesce_layout = coalesce(layout) @test depth(coalesce_layout) <= One() @test size(coalesce_layout) == size(layout) for i in One():size(layout) @test coalesce_layout(i) == layout(i) end end let layout = make_layout(_1, Int(0)) test_coalesce(layout) end let layout = make_layout(_1, _1) test_coalesce(layout) end let layout = make_layout(tuple(_2, _4)) test_coalesce(layout) end let layout = make_layout(tuple(_2, _4, _6)) test_coalesce(layout) end let layout = make_layout(tuple(static(2), static(1), static(6)), tuple(static(1), static(6), static(2))) test_coalesce(layout) end let layout = make_layout(tuple(static(2), static(1), static(6)), tuple(static(1), 7, static(2))) test_coalesce(layout) end let layout = make_layout(tuple(static(2), static(1), static(6)), tuple(static(4), 7, static(8))) test_coalesce(layout) end let layout = make_layout(tuple(2, static(4), static(6))) test_coalesce(layout) end let layout = make_layout(tuple(static(2), 4, static(6))) test_coalesce(layout) end let layout = make_layout(tuple(static(2), static(4), 6)) test_coalesce(layout) end let layout = make_layout(tuple(static(2), static(4)), GenRowMajor) test_coalesce(layout) end let layout = make_layout(tuple(static(2), static(4), static(6)), GenRowMajor) test_coalesce(layout) end let layout = make_layout(tuple(2, static(4), static(6)), GenRowMajor) test_coalesce(layout) end let layout = make_layout(tuple(static(2), 4, static(6)), GenRowMajor) test_coalesce(layout) end let layout = make_layout(tuple(static(2), static(4), 6), GenRowMajor) test_coalesce(layout) end let layout = make_layout(tuple(static(2), static(1), static(3)), GenRowMajor) test_coalesce(layout) end let layout = make_layout(tuple(static(2), 1, static(3)), GenRowMajor) test_coalesce(layout) end let layout = make_layout(tuple(static(2), 1, static(3)), tuple(static(2), 4, static(4))) test_coalesce(layout) end let layout = make_layout(tuple(static(2), 1, static(3)), tuple(static(2), Int(0), static(4))) test_coalesce(layout) end let layout = Layout(tuple(tuple(static(2), static(2)), tuple(static(2), static(2))), tuple(tuple(static(1), static(4)), tuple(static(8), static(32)))) test_coalesce(layout) end end @testset "Composition" begin @test make_layout(20, 2) ∘ make_layout((4, 5), (1, 4)) == make_layout((4, 5), (2, 8)) @test make_layout(20, 2) ∘ make_layout((4, 5), (5, 1)) == make_layout((4, 5), (10, 2)) @test_opt make_layout(20, 2) ∘ make_layout((4, 5), (1, 4)) @test_opt make_layout(20, 2) ∘ make_layout((4, 5), (5, 1)) function test_composition(A,B) @test_opt A ∘ B C = A ∘ B @test MoYe.iscompatible(B,C) for i in static(1):size(C) @test C(i) == A(B(i)) end end let a = @Layout(1,0), b = @Layout(1,0) test_composition(a, b) end let a = @Layout(1,0), b = @Layout(1,1) test_composition(a, b) end let a = @Layout(1,1), b = @Layout(1,0) test_composition(a, b) end let a = @Layout(1,1), b = @Layout(1,1) test_composition(a, b) end let a = @Layout((4,)), b = @Layout((4,)) test_composition(a, b) end let a = @Layout((4,), (2,)), b = @Layout((4,)) test_composition(a, b) end let a = @Layout((4,), (0,)), b = @Layout((4,)) test_composition(a, b) end let a = @Layout((4,)), b = @Layout((4,), (0,)) test_composition(a, b) end let a = @Layout((4,)), b = @Layout((1,), (0,)) test_composition(a, b) end let a = @Layout((4,)), b = @Layout((2,)) test_composition(a, b) end let a = @Layout((4,), (2,)), b = @Layout((2,)) test_composition(a, b) end let a = @Layout((4,)), b = @Layout((2,), (2,)) test_composition(a, b) end let a = @Layout((4,), (2,)), b = @Layout((2,), (2,)) test_composition(a, b) end let a = @Layout((4,3)), b = @Layout((12,)) test_composition(a, b) end let a = @Layout((12,)), b = @Layout((4,3)) test_composition(a, b) end let a = @Layout((12,), (2,)), b = @Layout((4,3)) test_composition(a, b) end let a = @Layout((12,)), b = @Layout((4,3), (3,1)) test_composition(a, b) end let a = @Layout((12,), (2,)), b = @Layout((4,3), (3,1)) test_composition(a, b) end let a = @Layout((12,)), b = @Layout((2,3), (2,4)) test_composition(a, b) end let a = @Layout((4,3)), b = @Layout((4,3)) test_composition(a, b) end let a = @Layout((4,3)), b = @Layout((6,), (2,)) test_composition(a, b) end let a = @Layout((4,3)), b = @Layout((6,2), (2,1)) test_composition(a, b) end let a = @Layout((4,3), (3,1)), b = @Layout((4,3)) test_composition(a, b) end let a = @Layout((4,3), (3,1)), b = @Layout((12,)) test_composition(a, b) end let a = @Layout((4,3), (3,1)), b = @Layout((6,), (2,)) test_composition(a, b) end let a = @Layout((4,3), (3,1)), b = @Layout((6,2), (2,1)) test_composition(a, b) end let a = @Layout((8,8)), b = @Layout(((2,2,2),(2,2,2)), ((1,16,4),(8,2,32))) test_composition(a, b) end let a = @Layout((8,8), (8,1)), b = @Layout(((2,2,2),(2,2,2)), ((1,16,4),(8,2,32))) test_composition(a, b) end let a = @Layout(((4,2),), ((1,16),)), b = @Layout((4,2), (2,1)) test_composition(a, b) end let a = @Layout((2,2), (2,1)), b = @Layout((2,2), (2,1)) test_composition(a, b) end let a = @Layout((4,8,2)), b = @Layout((2,2,2), (2,8,1)) test_composition(a, b) end let a = @Layout((4,8,2), (2,8,1)), b = @Layout((2,2,2), (1,8,2)) test_composition(a, b) end let a = @Layout((4,8,2), (2,8,1)), b = @Layout((4,2,2), (2,8,1)) test_composition(a, b) end @testset "Dynamic" begin let a = make_layout(12, 1), b = make_layout(static(4), static(1)) test_composition(a, b) end let a = make_layout(12, 1), b = make_layout(static(4), 1) test_composition(a, b) end let a = make_layout(12, static(1)), b = make_layout(static(4), 1) test_composition(a, b) end let a = make_layout(12, static(1)), b = make_layout(static(4), static(1)) test_composition(a, b) end let a = make_layout(tuple(12, 3), tuple(1, 24)), b = make_layout(tuple(static(4)), tuple(static(1))) test_composition(a, b) end let a = make_layout(16, 2), b = make_layout(4, 2) test_composition(a, b) end let a = make_layout(tuple(128, 24, 5), tuple(1, 128, 3072)), b = make_layout(64, 2) test_composition(a, b) end let a = make_layout(tuple(128, 24, 5), tuple(1, 128, 3072)), b = make_layout(480, static(32)) test_composition(a, b) end end end @testset "Complement" begin @test complement(@Layout(4, 1), static(24)) == @Layout(6, 4) @test complement(@Layout(6, 4), static(24)) == @Layout(4, 1) @test_opt complement(@Layout(4, 1), static(24)) @test_opt complement(@Layout(6, 4), static(24)) function test_complement(l, cotarget) @test_opt complement(l, cotarget) result = complement(l, cotarget) @test size(result) ≥ cotarget ÷ size(filter(l)) @test cosize(result) ≤ cld(cotarget, cosize(l)) * cosize(l) completed = make_layout(l, result) @test cosize(completed) ≥ cotarget for i in 2:size(result) @test result(i-1) < result(i) for j in 1:size(l) @test result(i) != l(j) end end @test size(result) ≤ cosize(result) @test cosize(result) ≥ cotarget ÷ size(filter(l)) @test cosize(completed) ≤ cosize(result) + cosize(l) @test cosize(result) ≥ cotarget ÷ size(filter(l)) if MoYe.dynamic(MoYe.is_static(stride(make_layout(l, result)))) @test size(complement(make_layout(l, result))) == 1 end end test_complement(l::Layout) = test_complement(l, cosize(l)) let layout = @Layout(1,0) test_complement(layout) test_complement(layout, static(2)) end let layout = @Layout(1,1) test_complement(layout) test_complement(layout, static(2)) end let layout = @Layout(1,2) test_complement(layout, One()) test_complement(layout, static(2)) test_complement(layout, static(8)) end let layout = @Layout(4,0) test_complement(layout, One()) test_complement(layout, static(2)) test_complement(layout, static(8)) end let layout = @Layout(4,1) test_complement(layout, One()) test_complement(layout, static(2)) test_complement(layout, static(8)) end let layout = @Layout(4,2) test_complement(layout, One()) test_complement(layout) test_complement(layout, static(16)) end let layout = @Layout(4,4) test_complement(layout, One()) test_complement(layout) test_complement(layout, static(17)) end let layout = @Layout(2,4) test_complement(layout) end let layout = @Layout(2,3) test_complement(layout) end let layout = @Layout((2,4), (1,4)) test_complement(layout) end let layout = @Layout((2,4,8), (8,1,64)) test_complement(layout) end let layout = @Layout((2,4,8), (8,1,0)) test_complement(layout) test_complement(layout, static(460)) end let layout = @Layout(((2,2), (2,2)), ((1,4), (8,32))) test_complement(layout) end end @testset "Product" begin tile = @Layout((2, 2), (1, 2)) matrix_of_tiles = @Layout((3, 4), (4, 1)) @testset "Logical product" begin result = logical_product(tile, matrix_of_tiles) @test shape(result) == ((2, 2), (3, 4)) @test stride(result) == ((1, 2), (16, 4)) @test_opt logical_product(tile, matrix_of_tiles) @test logical_product(@Layout(1), @Layout((2,2))) == @Layout((1,(2,2))) function test_logical_product(A,B) @test_opt logical_product(A,B) C = logical_product(A,B) @test rank(C) == 2 @test MoYe.iscompatible(A, first(C)) end let vec = @Layout(1,0), tile = @Layout(1,0) test_logical_product(vec, tile) end let vec = @Layout(1,1), tile = @Layout(1,0) test_logical_product(tile, vec) end let vec = @Layout(1,1), tile = @Layout(1,1) test_logical_product(vec, tile) end let vec = @Layout(3,1), tile = @Layout(4,0) test_logical_product(tile, vec) end let vec = @Layout(3,0), tile = @Layout(4,0) test_logical_product(vec, tile) end let vec = @Layout(3,2), tile = @Layout(4,1) test_logical_product(vec, tile) end let vec = @Layout((3,)), tile = @Layout((2,4)) test_logical_product(vec, tile) end let vec = @Layout((8,(2,2))), tile = @Layout(4,2) test_logical_product(vec, tile) end let vec = @Layout((2,2)), tile = @Layout((3,3), (3,1)) test_logical_product(vec, tile) end let vec = @Layout(3,32), tile = @Layout((8,8)) test_logical_product(vec, tile) end let vec = @Layout(((2,2),(2,2)), ((1,4),(8,32))), tile = @Layout((2,2), (1,2)) test_logical_product(vec, tile) end let vec = @Layout(((2,2),(2,2)), ((1,4),(8,32))), tile = @Layout((2,2), (2,1)) test_logical_product(vec, tile) end end @testset "Blocked product" begin result = blocked_product(tile, matrix_of_tiles, true) @test shape(result) == ((2, 3), 8) @test stride(result) == ((1, 16), 2) @test_opt blocked_product(static(tile), static(matrix_of_tiles), true) end @testset "Raked product" begin result = raked_product(tile, matrix_of_tiles, true) @test shape(result) == ((3, 2), (4, 2)) @test stride(result) == ((16, 1), (4, 2)) @test_opt raked_product(static(tile), static(matrix_of_tiles), true) end @testset "Zipped product" begin result = zipped_product(tile, matrix_of_tiles) @test shape(result) == ((2, 2), (3, 4)) @test stride(result) == ((1, 2), (16, 4)) @test_opt zipped_product(static(tile), static(matrix_of_tiles)) end @testset "Tiled product" begin result = tiled_product(tile, matrix_of_tiles) @test shape(result) == ((2, 2), 3, 4) @test stride(result) == ((1, 2), 16, 4) @test_opt tiled_product(static(tile), static(matrix_of_tiles)) end end @testset "Division" begin tile = @Layout((2, 2), (1, 2)) matrix_of_tiles = @Layout((3, 4), (4, 1)) raked_prod = raked_product(tile, matrix_of_tiles) subtile = (@Layout(2, 3), @Layout(2, 4)) @testset "Logical division" begin @test logical_divide(@Layout(16, 3), @Layout(4, 1)) == @Layout((4, 4), (3, 12)) @test logical_divide(@Layout(16, 3), @Layout(4, 4)) == @Layout((4, 4), (12, 3)) @test logical_divide(@Layout(16, 3), @Layout(4, 2)) == @Layout((4, (2, 2)), (6, (3, 24))) @test logical_divide(@Layout(16, 3), @Layout((2, 2), (4, 1))) == @Layout(tuple((2, 2), (2, 2)), tuple((12, 3), (6, 24))) @test logical_divide(raked_prod, subtile) == @Layout(((2, 3), (2, 4)), ((1, 16), (2, 4))) @test_opt logical_divide(raked_prod, subtile) @test_call logical_divide(raked_prod, subtile) function test_logical_divide(A, B) @test_opt logical_divide(A,B) C = logical_divide(A,B) @test rank(C) == 2 @test MoYe.iscompatible(B, first(C)) end let layout = @Layout(1, 0), tile = @Layout(1, 0) test_logical_divide(layout, tile) end let layout = @Layout(1, 0), tile = @Layout(1, 1) test_logical_divide(layout, tile) end let layout = @Layout(1, 1), tile = @Layout(1, 0) test_logical_divide(layout, tile) end let layout = @Layout(1, 1), tile = @Layout(1, 1) test_logical_divide(layout, tile) end let layout = @Layout(6, 1), tile = @Layout(2, 1) test_logical_divide(layout, tile) end let layout = @Layout(6, 1), tile = @Layout(2, 3) test_logical_divide(layout, tile) end let layout = @Layout((6, 6), (1, 12)), tile = @Layout((6, 3), (3, 1)) test_logical_divide(layout, tile) end let layout = @Layout((6, 6), (12, 1)), tile = @Layout((6, 3), (3, 1)) test_logical_divide(layout, tile) end let layout = @Layout(32), tile = @Layout((2, 8)) test_logical_divide(layout, tile) end let layout = @Layout((4, 1), (1, 1)), tile = @Layout(2, 1) test_logical_divide(layout, tile) end let layout = @Layout((4, 1), (1, 1)), tile = @Layout(2, 2) test_logical_divide(layout, tile) end let layout = @Layout((8, 8), (1, 8)), tile = @Layout((32, 2)) test_logical_divide(layout, tile) end let layout = @Layout((8, 8), (8, 1)), tile = @Layout((32, 2)) test_logical_divide(layout, tile) end end @testset "Zipped division" begin @test zipped_divide(raked_prod, subtile) == @Layout(((2, 2), (3, 4)), ((1, 2), (16, 4))) @test_opt zipped_divide(static(raked_prod), static(subtile)) end @testset "Tiled division" begin @test tiled_divide(raked_prod, subtile) == @Layout(((2, 2), 3, 4), ((1, 2), 16, 4)) @test_opt zipped_divide(static(raked_prod), static(subtile)) end end @testset "Inverse" begin @testset "Right Inverse" begin function test_right_inverse(l) inv_l = right_inverse(l) @test_opt right_inverse(l) @test_call right_inverse(l) for i in 1:size(inv_l) @test l(inv_l(i)) == i end end test_right_inverse(@Layout(1, 0)) test_right_inverse(@Layout(1, 1)) test_right_inverse(@Layout((4,), (0,))) test_right_inverse(@Layout((4,), (1,))) test_right_inverse(@Layout((4,), (2,))) test_right_inverse(@Layout((1,1), (0,0))) test_right_inverse(@Layout((3,7), (0,0))) test_right_inverse(@Layout((1,), (1,))) test_right_inverse(@Layout((2,4), (0,2))) test_right_inverse(@Layout((8,4))) test_right_inverse(@Layout((8,4), (4,1))) test_right_inverse(@Layout((2,4,6))) test_right_inverse(@Layout((2,4,6), (4,1,8))) test_right_inverse(@Layout((2,4,4,6), (4,1,0,8))) test_right_inverse(@Layout((4,2), (1,16))) test_right_inverse(@Layout((4,2), (1,5))) end end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1556
using Test, SafeTestsets, CUDA @safetestset "Tuple Algorithms" begin include("tuple_alg.jl") end @safetestset "IntTuple" begin include("int_tuple.jl") end @safetestset "Stride" begin include("stride.jl") end @safetestset "Layout" begin include("layout.jl") end @safetestset "Static" begin include("static.jl") end @safetestset "Engine" begin include("engine.jl") end @testset "MoYeArray" begin @safetestset "MoYeArray" begin include("array.jl") end @safetestset "Broadcast" begin include("broadcast.jl") end end @safetestset "Tiling" begin include("tiling.jl") end @safetestset "Copy" begin include("copy.jl") end if CUDA.functional() @testset "Device" begin @safetestset "Memory" begin include("device/memory.jl") end @safetestset "MMA" begin include("device/mmaop.jl") end @safetestset "MMATraits" begin include("device/mmatraits.jl") end @safetestset "Pointer" begin include("device/pointer.jl") end @safetestset "LDMatrix" begin include("device/ldmatrix.jl") end @safetestset "Broadcast" begin include("device/broadcast.jl") end @safetestset "Tiled Copy" begin include("device/tiled_copy.jl") end @safetestset "MatMul" begin include("device/matmul.jl") end end end @testset "Host" begin @safetestset "CPU MatMul" begin include("host/tiling_matmul.jl") end @safetestset "Copy Async" begin include("host/copy_async.jl") end end @testset "Tiled MMA" begin @safetestset "Tiled MMA" begin include("tiled_mma.jl") end end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
312
using MoYe, Test, Static using Static: One layout = make_layout(static((3, 4)), static((1, 2))) @test sizeof(layout) == 0 @test size(layout) isa StaticInt @test cosize(layout) isa StaticInt @test depth(layout) isa StaticInt @test layout(One()) isa StaticInt @test layout(static((1, 2))) isa StaticInt
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
2337
using MoYe, Test, JET @testset "Coordinate to Index" begin @testset "1-D Coordinate" begin @test coord_to_index(5, (3, 4), (2, 8)) == 11 @test coord_to_index(7, (3, 4), (2, 8)) == 17 @test coord_to_index(12, (3, 4), (2, 8)) == 29 @test_opt coord_to_index(5, (3, 4), (2, 8)) @test_opt coord_to_index(7, (3, 4), (2, 8)) @test_opt coord_to_index(12, (3, 4), (2, 8)) end @testset "h-D Coordinate" begin @test coord_to_index((2, 2), (3, 4), (2, 8)) == 11 @test coord_to_index((1, 3), (3, 4), (2, 8)) == 17 @test coord_to_index((3, 4), (3, 4), (2, 8)) == 29 @test_opt coord_to_index((2, 2), (3, 4), (2, 8)) @test_opt coord_to_index((1, 3), (3, 4), (2, 8)) @test_opt coord_to_index((3, 4), (3, 4), (2, 8)) end @testset "R-D Coordinate" begin @test coord_to_index((2, 3), (3, (2, 2)), (2, (16, 8))) == 11 @test coord_to_index((1, 2), (3, (2, 2)), (2, (16, 8))) == 17 @test coord_to_index((3, 4), (3, (2, 2)), (2, (16, 8))) == 29 @test_opt coord_to_index((2, 3), (3, (2, 2)), (2, (16, 8))) @test_opt coord_to_index((1, 2), (3, (2, 2)), (2, (16, 8))) @test_opt coord_to_index((3, 4), (3, (2, 2)), (2, (16, 8))) end @testset "Default Stride" begin #@test_throws DimensionMismatch coord_to_index(5, (3, 4)) @test coord_to_index((1, 2), (3, 4)) == 4 @test coord_to_index((2, 3), (3, 4)) == 8 @test coord_to_index((3, 4), (3, 4)) == 12 @test_opt coord_to_index((1, 2), (3, 4)) @test_opt coord_to_index((2, 3), (3, 4)) @test_opt coord_to_index((3, 4), (3, 4)) end end @testset "Index to Coord" begin @test index_to_coord(9, (3, 4), (1, 3)) == (3, 3) @test index_to_coord(10, (3, 4), (1, 3)) == (1, 4) @test_opt index_to_coord(9, (3, 4), (1, 3)) @test_opt index_to_coord(10, (3, 4), (1, 3)) @test_throws DimensionMismatch index_to_coord(9, (3, 4), (1, 3, 5)) end @testset "Default Major" begin @test compact_col_major(static((3, 4, 5))) === static((1, 3, 12)) @test compact_row_major(static((3, 4, 5))) === static((20, 5, 1)) @test_opt compact_col_major((3, 4, 5)) @test_opt compact_row_major((3, 4, 5)) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1014
using MoYe, Test A_data = collect(1:48*8*7) B_data = collect(1:40*9*8) C_data = collect(1:48*7*40*9) A = MoYeArray(pointer(A_data), @Layout((48*7,8))) B = MoYeArray(B_data, @Layout((40*9,8))) C = MoYeArray(C_data, @Layout((48*7,40*9))) # Tile size tiled_mma = MoYe.make_tiled_mma(MMAOP_16x8x8_F16F16F16F16_TN(), @Layout((3,5))) @test tile_size(tiled_mma, _1) == 16 * 3 @test tile_size(tiled_mma, _2) == 8 * 5 @test tile_size(tiled_mma, _3) == 8 thr_mma = MoYe.get_slice(tiled_mma, 1) thr_C = partition_C(thr_mma, C) thr_A = partition_A(thr_mma, A) thr_B = partition_B(thr_mma, B) @test size(thr_C, 2) == 7 @test size(thr_C, 3) == 9 @test size(thr_A, 2) == 7 @test size(thr_A, 3) == 1 @test size(thr_B, 2) == 9 @test size(thr_B, 3) == 1 frag_C = partition_fragment_C(thr_mma, C) frag_A = partition_fragment_A(thr_mma, A) frag_B = partition_fragment_B(thr_mma, B) @test size(frag_C) == size(thr_C) @test size(frag_A) == size(thr_A) @test size(frag_B) == size(thr_B)
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
142
using MoYe, Test a = MoYeArray(pointer([i for i in 1:48]), @Layout((6,8))) tile = @tile a (_3, _2) (1, :); @test rank(tile.layout) == 3
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
2527
using MoYe, Test, JET using MoYe: make_tuple @testset "Unflatten" begin @test unflatten((1, 2, 3), (0, 0, 0)) == (1, 2, 3) @test unflatten((1, 2, 3, 4), (0, (0, (0, 0)))) == (1, (2, (3, 4))) @test unflatten((1, 2, 3, 4), ((0, 0), (0, 0))) == ((1, 2), (3, 4)) @test unflatten((1, 2, 3, 4, 5), (0, (0, 0), 0)) == (1, (2, 3), 4) @test unflatten((1, 2, 3, 4, 5, 6), (0, (0, (0, (0, (0, 0)))))) == (1, (2, (3, (4, (5, 6))))) @test unflatten((), ()) == () end @testset "Front" begin @test MoYe.front(((2, 3), 4)) === 2 @test_opt MoYe.front(((2, 3), 4)) end @testset "Back" begin @test MoYe.back((2, (3, 4))) === 4 @test_opt MoYe.back(((2, 3), 4)) end @testset "Tuple Cat" begin @test MoYe.tuple_cat((1, 2), (3, 4)) === (1, 2, 3, 4) @test_opt MoYe.tuple_cat((1, 2), (3, 4)) end @testset "Insert" begin @test MoYe.insert((1, 2, 3), 10, 2) === (1, 10, 2, 3) @test_opt MoYe.insert((1, 2, 3), 10, 2) end @testset "Remove" begin @test MoYe.remove((1, 2, 3), _2) === (1, 3) @test_opt MoYe.remove((1, 2, 3), _2) end @testset "Replace" begin @test replace((1, 2, 3), 10, 2) === (1, 10, 3) @test_opt replace((1, 2, 3), 10, 2) end @testset "Unwrap" begin @test MoYe.unwrap(((1,))) === 1 @test_opt MoYe.unwrap(((1,))) end @testset "Append" begin @test MoYe.append((1, 2), 3) === (1, 2, 3) @test_opt MoYe.append((1, 2), 3) @test MoYe.append((1, 2), 3, _4) === (1, 2, 3, 3) @test_opt MoYe.append((1, 2), 3, _4) end @testset "Prepend" begin @test MoYe.prepend((1, 2), 3) === (3, 1, 2) @test_opt MoYe.prepend((1, 2), 3) @test MoYe.prepend((1, 2), 3, _4) === (3, 3, 1, 2) @test_opt MoYe.prepend((1, 2), 3, _4) end @testset "Exclusive scan" begin MoYe.escan(*, (1, 2, 3, 4, 5), 10) === (10, 10, 20, 60, 240) @test_opt MoYe.escan(*, (1, 2, 3, 4, 5), 10) end @testset "zip2_by" begin @test MoYe.zip2_by((('A', 'a'), (('B', 'b'), ('C', 'c')), 'd'), (0, (0, 0))) == (('A', ('B', 'C')), ('a', ('b', 'c'), 'd')) @test_opt MoYe.zip2_by((('A', 'a'), (('B', 'b'), ('C', 'c')), 'd'), (0, (0, 0))) end @testset "Make Tuple" begin t = typeof(static((1,2,3))) t2 = make_tuple(t) @test t2 === static((1,2,3)) t3 = typeof(static((1,2,(3,4)))) t4 = make_tuple(t3) @test t4 === static((1,2,(3,4))) t5 = typeof(static((1,2,(3,4),5))) t6 = make_tuple(t5) @test t6 === static((1,2,(3,4),5)) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
251
using MoYe, CUDA, Test function f() a = MoYeArray{Float64}(undef, @Layout((3,2))) fill!(a, one(eltype(a))) a .= a .* 2 @cushow sum(a) b = CUDA.exp.(a) @cushow sum(b) return nothing end @test_nowarn @cuda f()
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
3908
using Test, MoYe, CUDA @inline _getfirst(x::UInt32) = x @inline _getfirst(x) = getfield(x, 1) if MoYe.LLVM.version().major>=15 @testset "Compile to LLVM" begin function kernel(op) A = MoYeSharedArray(UInt32, @Layout((32,32))) a_frag = op(pointer(A, threadIdx().x*Int32(4))) @cushow Float32(_getfirst(a_frag)) return nothing end op_to_intrinsic = Dict(MoYe.ldmatrix_ops_list) for op_name in keys(op_to_intrinsic) op = @eval MoYe.$(Symbol(op_name)) buf = IOBuffer() @device_code_llvm io = buf @cuda launch=false kernel(op()) asm = String(take!(copy(buf))) @test occursin(op_to_intrinsic[op_name], asm) end end @testset "16x8x16_F16F16F16F16" begin function kernel(A,B,C, smemlayout_A, smemlayout_B, thread_layout) moye_A = MoYeArray(pointer(A), @Layout((16,16))) # M-major moye_B = MoYeArray(pointer(B), @Layout((16,8))) # K-major moye_C = MoYeArray(pointer(C.parent), @Layout((16, 8), (8, 1))) smem_A = MoYeSharedArray(Float16, smemlayout_A) # K-major smem_B = MoYeSharedArray(Float16, smemlayout_B) # K-major threadtile_A = @parallelize moye_A thread_layout threadIdx().x threadtile_B = @parallelize moye_B thread_layout threadIdx().x threadtile_smem_A = @parallelize smem_A thread_layout threadIdx().x threadtile_smem_B = @parallelize smem_B thread_layout threadIdx().x for i in eachindex(threadtile_A) threadtile_smem_A[i] = threadtile_A[i] end for i in eachindex(threadtile_B) threadtile_smem_B[i] = threadtile_B[i] end sync_threads() smem_B′ = MoYe.transpose(smem_B) # (N, K) (8, 16) frag_A = MoYeArray{Float16}(undef, @Layout((8,))) frag_B = MoYeArray{Float16}(undef, @Layout((4,))) frag_C = MoYeArray{Float16}(undef, @Layout((4,))) zeros!(frag_C) # loading from shared memory to registers ld_A = MoYe.LDSM_U32x4_N() ld_B = MoYe.LDSM_U32x2_N() recasted_smem_A = recast(UInt128, smem_A) # 16x2 recasted_smem_B = recast(UInt128, smem_B′) # 8x2 # or parallelize then recast copytile_smem_A = @parallelize recasted_smem_A @Layout((16, 2)) threadIdx().x copytile_smem_B = @parallelize recasted_smem_B @Layout((8, 2)) threadIdx().x recasted_frag_A = recast(UInt32, frag_A) recasted_frag_B = recast(UInt32, frag_B) copyto!(ld_A, recasted_frag_A, copytile_smem_A) copyto!(ld_B, recasted_frag_B, copytile_smem_B) # good syntax here traits = MoYe.MMATraits{MoYe.MMAOP_16x8x16_F16F16F16F16_TN}() @gc_preserve MoYe.mma_unpack!(traits, frag_C, frag_A, frag_B, frag_C) recasted_moye_C = recast(UInt32, moye_C) # 16x4 recasted_frag_C = recast(UInt32, frag_C) # 2x1 row, col = fldmod1(threadIdx().x, Int32(4)) # awkward manual indexing recasted_moye_C[row, col] = recasted_frag_C[1] recasted_moye_C[row+8, col] = recasted_frag_C[2] return nothing end smemlayout_A = @Layout((16, 16), (16, 1)) smemlayout_B = @Layout((16, 8), (1, 16)) thread_layout = @Layout (16, 2) A = CUDA.rand(Float16, 16, 16) B = CUDA.rand(Float16, 16, 8) C = transpose(CUDA.rand(Float16, 8, 16)) # row-major, this is awkward @cuda threads=32 kernel(A,B,C, smemlayout_A, smemlayout_B, thread_layout) CUDA.synchronize() @test A * B ≈ C end end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
2181
using MoYe, CUDA, Test function matmul_kernel(A, sA_layout, tA, B, sB_layout, tB, C, tC) M = size(A, 1) N = size(B, 1) K = size(A, 2) bM = size(sA_layout, 1) bN = size(sB_layout, 1) bK = size(sB_layout, 2) sA = MoYeSharedArray(eltype(A), sA_layout) sB = MoYeSharedArray(eltype(B), sB_layout) mA = MoYeArray(A, (M, K)) mB = MoYeArray(B, (N, K)) mC = MoYeArray(C, (M, N)) gA = @tile mA (bM, bK) (blockIdx().x, :) gB = @tile mB (bN, bK) (blockIdx().y, :) gC = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # copy partition tAgA = @parallelize gA tA threadIdx().x tBgB = @parallelize gB tB threadIdx().x tAsA = @parallelize sA tA threadIdx().x tBsB = @parallelize sB tB threadIdx().x # mma partition tCsA = @parallelize sA tC threadIdx().x (1, :) tCsB = @parallelize sB tC threadIdx().x (:, 1) tCgC = @parallelize gC tC threadIdx().x # accumulator tCrC = similar(tCgC) zeros!(tCrC) for k in axes(tAgA, 3) copyto!(tAsA, view(tAgA, :, :, k)) copyto!(tBsB, view(tBgB, :, :, k)) cp_async_wait() sync_threads() @gc_preserve gemm!(tCrC, tCsA, tCsB, tCrC) sync_threads() end copyto!(tCgC, tCrC) return nothing end function matmul(A, B, C) bM = _128 bN = _128 bK = _8 sA_layout = make_layout((bM, bK), (_1, bM + _1)) sB_layout = make_layout((bN, bK), (_1, bN + _1)) tA = @Layout (32, 8) tB = @Layout (32, 8) tC = @Layout (16, 16) threads = Int(size(tC)) blocks = (cld(size(A, 1), bM), cld(size(B, 1), bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, sA_layout, tA, B, sB_layout, tB, C, tC) end function test() A = CUDA.randn(Float32, 2048, 256) B = CUDA.randn(Float32, 2048, 256) C = CUDA.randn(Float32, 2048, 2048) matmul(A, B, C) CUDA.synchronize() @test C == A * B' CUDA.unsafe_free!(A) CUDA.unsafe_free!(B) CUDA.unsafe_free!(C) end test()
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
702
using MoYe, Test, CUDA using Static: One @testset "Global Memory" begin a = CUDA.rand(Float32, 10) a = CUDA.cudaconvert(a) MoYeArray(pointer(a), static((2, 5))) end @testset "Shared Memory" begin ptr = MoYe.SharedMemory(Float32, _10) MoYeArray(ptr, static((2, 5))) end @testset "Register Memory" begin @test_nowarn MoYeArray{Float32}(undef, static((2, 5))) a = CUDA.rand(Float32, 8, 16) a = CUDA.cudaconvert(a) gmem_8sx16d = MoYeArray(pointer(a), (_8, 16)) rmem = make_fragment_like(view(gmem_8sx16d, :, 1)) @test rmem.layout.shape == tuple(_8) @test rmem.layout.stride == tuple(One()) @test length(rmem.engine) == 8 end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1141
using Test, MoYe, CUDA @inline make_fragment(::Type{MoYe.Registers{T,S}}) where {T, S} = MoYeArray{T}(undef, (static(S),)) _float32(x::Number) = Float32(x) _float32(x::VecElement) = Float32(x.value) _one(::Type{NTuple{N, VecElement{T}}}) where {N, T} = ntuple(i -> VecElement(one(T)), Val(N)) _one(x) = one(x) @testset "Compile to LLVM" begin function kernel(mma_op) a_frag = make_fragment(mma_op.ARegisters) b_frag = make_fragment(mma_op.BRegisters) c_frag = make_fragment(mma_op.CRegisters) fill!(a_frag, _one(eltype(a_frag))) fill!(b_frag, _one(eltype(b_frag))) fill!(c_frag, _one(eltype(c_frag))) d_frag = mma_op(a_frag, b_frag, c_frag) @cushow _float32(getfield(d_frag,1)[1]) return end op_to_intrinsic = Dict(MoYe.mma_ops_list) for op_name in keys(op_to_intrinsic) op = @eval MoYe.$(Symbol(op_name)) buf = IOBuffer() @device_code_llvm io = buf @cuda threads=32 kernel(op()) asm = String(take!(copy(buf))) @test occursin(get(op_to_intrinsic, "$op", ""), asm) end end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
3308
using Test, MoYe, CUDA @testset "Size of MMATraits" begin for mmaop in [MoYe.MMAOP_16x8x8_F16F16F16F16_TN, MoYe.MMAOP_16x8x8_F32F16F16F32_TN, MoYe.MMAOP_16x8x8_F32BF16BF16F32_TN, MoYe.MMAOP_16x8x16_F16F16F16F16_TN, MoYe.MMAOP_16x8x16_F32F16F16F32_TN, MoYe.MMAOP_16x8x16_F32BF16BF16F32_TN] @test sizeof(MoYe.MMATraits{mmaop}()) == 0 end end @testset "16x8x16_F16F16F16F16" begin function kernel(A,B,C, smemlayout_A, smemlayout_B, thread_layout) moye_A = MoYeArray(pointer(A), @Layout((16,16))) # M-major moye_B = MoYeArray(pointer(B), @Layout((16,8))) # K-major moye_C = MoYeArray(pointer(C.parent), @Layout((16, 8), (8, 1))) smem_A = MoYeSharedArray(Float16, smemlayout_A) # K-major smem_B = MoYeSharedArray(Float16, smemlayout_B) # K-major threadtile_A = @parallelize moye_A thread_layout threadIdx().x threadtile_B = @parallelize moye_B thread_layout threadIdx().x threadtile_smem_A = @parallelize smem_A thread_layout threadIdx().x threadtile_smem_B = @parallelize smem_B thread_layout threadIdx().x for i in eachindex(threadtile_A) threadtile_smem_A[i] = threadtile_A[i] end for i in eachindex(threadtile_B) threadtile_smem_B[i] = threadtile_B[i] end sync_threads() smem_B′ = MoYe.transpose(smem_B) # (N, K) (8, 16) frag_A = MoYeArray{Float16}(undef, @Layout((8,))) frag_B = MoYeArray{Float16}(undef, @Layout((4,))) frag_C = MoYeArray{Float16}(undef, @Layout((4,))) zeros!(frag_C) # loading from shared memory to registers ld_A = MoYe.LDSM_U32x4_N() ld_B = MoYe.LDSM_U32x2_N() recasted_smem_A = recast(UInt128, smem_A) # 16x2 recasted_smem_B = recast(UInt128, smem_B′) # 8x2 # or parallelize then recast copytile_smem_A = @parallelize recasted_smem_A @Layout((16, 2)) threadIdx().x copytile_smem_B = @parallelize recasted_smem_B @Layout((8, 2)) threadIdx().x recasted_frag_A = recast(UInt32, frag_A) recasted_frag_B = recast(UInt32, frag_B) copyto!(ld_A, recasted_frag_A, copytile_smem_A) copyto!(ld_B, recasted_frag_B, copytile_smem_B) # good syntax here traits = MoYe.MMATraits{MoYe.MMAOP_16x8x16_F16F16F16F16_TN}() @gc_preserve MoYe.mma_unpack!(traits, frag_C, frag_A, frag_B, frag_C) recasted_moye_C = recast(UInt32, moye_C) # 16x4 recasted_frag_C = recast(UInt32, frag_C) # 2x1 row, col = fldmod1(threadIdx().x, Int32(4)) # awkward manual indexing recasted_moye_C[row, col] = recasted_frag_C[1] recasted_moye_C[row+8, col] = recasted_frag_C[2] return nothing end smemlayout_A = @Layout((16, 16), (16, 1)) smemlayout_B = @Layout((16, 8), (1, 16)) thread_layout = @Layout (16, 2) A = CUDA.rand(Float16, 16, 16) B = CUDA.rand(Float16, 16, 8) C = transpose(CUDA.rand(Float16, 8, 16)) # row-major, this is awkward @cuda threads=32 kernel(A,B,C, smemlayout_A, smemlayout_B, thread_layout) CUDA.synchronize() @test Array(A*B) ≈ Array(C) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
303
using MoYe, Test, CUDA p = reinterpret(Core.LLVMPtr{Int32, AS.Global}, 0) p2 = reinterpret(Core.LLVMPtr{Int32, AS.Shared}, 0) x = MoYeArray(p, @Layout(3)); x2 = MoYeArray(p2, @Layout(3)); @test isgmem(x) == true @test isgmem(x2) == false @test issmem(x) == false @test issmem(x2) == true
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
3377
using CUDA, MoYe, Test function tiled_copy_kernel(g_in, g_out, tiled_copy, smem_layout) t_g_in = MoYeArray(pointer(g_in), smem_layout) t_g_out = MoYeArray(pointer(g_out), smem_layout) t_smem=MoYeSharedArray(UInt16, smem_layout) tid=threadIdx().x for i in tid:size(tiled_copy):size(t_smem.layout) @inbounds t_smem[i] = t_g_in[i] end thr_copy = get_slice(tiled_copy, tid) tXsX = partition_S(thr_copy, t_smem) tXgX = partition_D(thr_copy, t_g_out) tXrX = MoYeArray{UInt16}(undef, tXgX.layout.shape) # smem to rmem copyto!(tiled_copy, tXrX, tXsX) # rmem to gmem copyto!(tXgX, tXrX) @inbounds tXrX.engine[1] # compiler bug, have to load return nothing end @testset "UniversalCopy" begin @testset "32 x 32" begin g_in = [UInt16(i) for i in 1:32*32] g_out = zeros(UInt16, 32*32) cu_g_in = CuArray(g_in) cu_g_out = CuArray(g_out) smem_layout = @Layout (32,32) (1,32) tiled_copy = make_tiled_copy(MoYe.CopyAtom{MoYe.UniversalCopy{UInt16, UInt16}, UInt16}(), @Layout((16,2)), @Layout((2,4))) @cuda threads=32 tiled_copy_kernel(cu_g_in, cu_g_out, tiled_copy, smem_layout) @test cu_g_out == cu_g_in end @testset "32 x 8" begin g_in = [UInt16(i) for i in 1:32*8] g_out = zeros(UInt16, 32*8) cu_g_in = CuArray(g_in) cu_g_out = CuArray(g_out) smem_layout = @Layout (32, (2, 4)) (2, (1, 64)) tiled_copy = make_tiled_copy(MoYe.CopyAtom{MoYe.UniversalCopy{UInt16, UInt16}, UInt16}(), @Layout((32,1)), @Layout((1,8))) @cuda threads=32 tiled_copy_kernel(cu_g_in, cu_g_out, tiled_copy, smem_layout) @test cu_g_out == cu_g_in end end @testset "LDMATRIX" begin @testset "32 x 32" begin g_in = [UInt16(i) for i in 1:32*32] g_out = zeros(UInt16, 32*32) smem_layout = @Layout (32,32) (1,32) cu_g_in = CuArray(g_in) cu_g_out = CuArray(g_out) for ldmatrix in [:LDSM_U32x1_N, :LDSM_U32x2_N, :LDSM_U32x4_N] @testset "$ldmatrix" begin @eval tiled_copy = make_tiled_copy(MoYe.CopyAtom{$ldmatrix, UInt16}(), @Layout((16,2)), @Layout((2,4))) @cuda threads=32 tiled_copy_kernel(cu_g_in, cu_g_out, tiled_copy, smem_layout) @test cu_g_out == cu_g_in fill!(cu_g_out, zero(UInt16)) end end end @testset "32 x 8" begin g_in = [UInt16(i) for i in 1:32*8] g_out = zeros(UInt16, 32*8) cu_g_in = CuArray(g_in) cu_g_out = CuArray(g_out) smem_layout = @Layout (32, (2, 4)) (2, (1, 64)) for ldmatrix in [:LDSM_U32x1_N, :LDSM_U32x2_N, :LDSM_U32x4_N] @testset "$ldmatrix" begin @eval tiled_copy = make_tiled_copy(MoYe.CopyAtom{$ldmatrix, UInt16}(), @Layout((32,1)), @Layout((1,8))) @cuda threads=32 tiled_copy_kernel(cu_g_in, cu_g_out, tiled_copy, smem_layout) @test cu_g_out == cu_g_in fill!(cu_g_out, zero(UInt16)) end end end end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
2980
using MoYe, Test, CUDA function copy_kernel(dest, src, smemlayout, blocklayout, threadlayout) smem = MoYe.SharedMemory(eltype(dest), cosize(smemlayout)) moye_smem = MoYeArray(smem, smemlayout) moye_dest = MoYeArray(dest) moye_src = MoYeArray(src) bM = size(blocklayout, 1) bN = size(blocklayout, 2) blocktile_dest = @tile moye_dest (bM, bN) (blockIdx().x, blockIdx().y) blocktile_src = @tile moye_src (bM, bN) (blockIdx().x, blockIdx().y) threadtile_dest = @parallelize blocktile_dest threadlayout threadIdx().x threadtile_src = @parallelize blocktile_src threadlayout threadIdx().x threadtile_smem = @parallelize moye_smem threadlayout threadIdx().x copyto!(threadtile_smem, threadtile_src) cp_async_wait() copyto!(threadtile_dest, threadtile_smem) return nothing end function test_copy_async(M, N) a = CUDA.rand(Float32, M, N) b = CUDA.rand(Float32, M, N) blocklayout = @Layout (32, 32) # 32 * 32 elements in a block smemlayout = @Layout (32, 32) # 32 * 32 elements in shared memory threadlayout = @Layout (32, 8) # 32 * 8 threads in a block bM = size(blocklayout, 1) bN = size(blocklayout, 2) blocks = (cld(M, bM), cld(N, bN)) threads = MoYe.dynamic(size(threadlayout)) @cuda blocks=blocks threads=threads copy_kernel(a, b, smemlayout, blocklayout, threadlayout) CUDA.synchronize() @test a == b end if CUDA.functional() test_copy_async(2048, 2048) end function transpose_kernel(dest, src, smemlayout, blocklayout, threadlayout) moye_smem = MoYeSharedArray(eltype(dest), smemlayout) moye_src = MoYeArray(src) moye_dest = MoYeArray(dest) bM = size(blocklayout, 1) bN = size(blocklayout, 2) blocktile_src = @tile moye_src (bM, bN) (blockIdx().x, blockIdx().y) blocktile_dest = @tile moye_dest (bN, bM) (blockIdx().y, blockIdx().x) threadtile_dest = @parallelize blocktile_dest threadlayout threadIdx().x threadtile_src = @parallelize blocktile_src threadlayout threadIdx().x threadtile_smem = @parallelize moye_smem threadlayout threadIdx().x copyto!(threadtile_smem, threadtile_src) cp_async_wait() sync_threads() moye_smem′ = MoYe.transpose(moye_smem) threadtile_smem′ = @parallelize moye_smem′ threadlayout threadIdx().x copyto!(threadtile_dest, threadtile_smem′) return nothing end function test_transpose(M, N) a = CUDA.rand(Float32, M, N) b = CUDA.rand(Float32, N, M) blocklayout = @Layout (32, 32) smemlayout = @Layout (32, 32) (1, 33) threadlayout = @Layout (32, 8) bM = size(blocklayout, 1) bN = size(blocklayout, 2) blocks = (cld(M, bM), cld(N, bN)) threads = MoYe.dynamic(size(threadlayout)) @cuda blocks=blocks threads=threads transpose_kernel(a, b, smemlayout, blocklayout, threadlayout) CUDA.synchronize() @test a == transpose(b) end if CUDA.functional() test_transpose(2048, 2048) end
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
code
1068
using MoYe, Test # C = A * B^T M = 4*32 N = 4*32 K = 8*32 A = rand(M, K) B = rand(N, K) C = zeros(M, N) threadlayout = @Layout (4, 4) moye_A = MoYeArray(pointer(A), (M, K)) moye_B = MoYeArray(pointer(B), (N, K)) moye_C = MoYeArray(pointer(C), (M, N)) tile_A = @parallelize moye_A threadlayout 1 (_1, :) tile_B = @parallelize moye_B threadlayout 1 (:, _1) GC.@preserve A B C begin moye_A = MoYeArray(pointer(A), (M, K)) moye_B = MoYeArray(pointer(B), (N, K)) moye_C = MoYeArray(pointer(C), (M, N)) Threads.@threads :static for i in 1:Threads.nthreads() tile_A = @parallelize moye_A threadlayout Threads.threadid() (_1, :) tile_B = @parallelize moye_B threadlayout Threads.threadid() (:, _1) tile_C = @parallelize moye_C threadlayout Threads.threadid() for k in 1:size(tile_A, 2) for m in 1:size(tile_C, 1) for n in 1:size(tile_C, 2) tile_C[m, n] += tile_A[m, k] * tile_B[n, k] end end end end end @test C ≈ A * transpose(B)
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
959
# MoYe [![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://YichengDWu.github.io/MoYe.jl/stable/) [![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://YichengDWu.github.io/MoYe.jl/dev/) [![Build Status](https://github.com/YichengDWu/MoYe.jl/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/YichengDWu/MoYe.jl/actions/workflows/CI.yml?query=branch%3Amain) [![Coverage](https://codecov.io/gh/YichengDWu/MoYe.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/YichengDWu/MoYe.jl) `MoYe.jl` is NVIDIA's [Cutlass/CuTe](https://github.com/NVIDIA/cutlass/blob/main/) implemented in Julia. The primary purpose of developing this library is my desire to learn CuTe. The name **Mo Ye** is derived from an ancient Chinese [legend of swordsmiths](https://en.wikipedia.org/wiki/Gan_Jiang_and_Mo_Ye). The documentation is mostly my learning notes. Please refer to CuTe's documentation for more details.
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
2147
```@meta CurrentModule = MoYe ``` # MoYe Documentation for [MoYe](https://github.com/YichengDWu/MoYe.jl). ## Quick Start ```julia julia> data = [i for i in 1:48]; julia> a = MoYeArray(data, @Layout((6,8))) 6×8 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{Static.StaticInt{6}, Static.StaticInt{8}}, Tuple{Static.StaticInt{1}, Static.StaticInt{6}}}} with indices _1:_6×_1:_8: 1 7 13 19 25 31 37 43 2 8 14 20 26 32 38 44 3 9 15 21 27 33 39 45 4 10 16 22 28 34 40 46 5 11 17 23 29 35 41 47 6 12 18 24 30 36 42 48 julia> subtile_a = @tile a (_3, _4) (1, 2) 3×4 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{Static.StaticInt{3}, Static.StaticInt{4}}, Tuple{Static.StaticInt{1}, Static.StaticInt{6}}}} with indices _1:_3×_1:_4: 25 31 37 43 26 32 38 44 27 33 39 45 julia> workitems_a = @parallelize subtile_a (_3, _2) (1, 1) 1×2 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{Static.StaticInt{1}, Static.StaticInt{2}}, Tuple{Static.StaticInt{0}, Static.StaticInt{12}}}} with indices _1:_1×_1:_2: 25 37 julia> for i in eachindex(workitems_a) workitems_a[i] = 0 end julia> a 6×8 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{Static.StaticInt{6}, Static.StaticInt{8}}, Tuple{Static.StaticInt{1}, Static.StaticInt{6}}}} with indices _1:_6×_1:_8: 1 7 13 19 0 31 0 43 2 8 14 20 26 32 38 44 3 9 15 21 27 33 39 45 4 10 16 22 28 34 40 46 5 11 17 23 29 35 41 47 6 12 18 24 30 36 42 48 julia> @tile subtile_a (_3, _1) (1, 2) 3×1 MoYeArray{Int64, 2, ViewEngine{Int64, Ptr{Int64}}, Layout{2, Tuple{Static.StaticInt{3}, Static.StaticInt{1}}, Tuple{Static.StaticInt{1}, Static.StaticInt{0}}}} with indices _1:_3×_1:_1: 31 32 33 ``` ## Tile Iterator ```@repl using MoYe data = collect(1:36); A = MoYeArray(data, @Layout((4,9))) tiled_A = zipped_divide(A, (@Layout(2), @Layout(3))) # 2 × 3 tile for i in axes(tiled_A, 2) @show view(tiled_A, :, i) end ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
176
## MoYeArray ```@meta CurrentModule = MoYe ``` ## Index ```@index Pages = ["array.md"] ``` ```@docs ViewEngine ArrayEngine MoYeArray recast zeros! ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
151
# MMA/Copy Atom ```@meta CurrentModule = MoYe ``` ## Index ```@index Pages = ["atom.md"] ``` ```@docs make_tiled_mma make_tiled_copy print_typst ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
271
## Data Movement ```@meta CurrentModule = MoYe ``` ## Index ```@index Pages = ["copy.md"] ``` ```@docs copyto!(dest::MoYeArray, src::MoYeArray) copyto!(ldmatrix::AbstractLdMatrix, dest::MoYeArray, src::MoYeArray) cp_async_wait cp_async_commit ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
831
# Layout ```@meta CurrentModule = MoYe ``` ## Index ```@index Pages = ["layout.md"] ``` ## Constructors ```@docs Layout @Layout make_layout ``` ## Fundamentals ```@docs size(::Layout) rank(::Layout) depth(::Layout) cosize(::Layout) getindex(layout::Layout, Is::IntType...) ``` ## Compact Layout ```@docs GenColMajor GenRowMajor ``` ## Algebra ### Concatenation ```@docs cat(::Layout...) make_layout(::Layout...) ``` ### Composition ```@docs composition ``` ### Complement ```@docs complement ``` ### Inverse ```@docs left_inverse right_inverse ``` ### Product ```@docs logical_product blocked_product raked_product ``` ### Division ```@docs logical_divide zipped_divide tiled_divide ``` ### Miscellaneous ```@docs coalesce flatten(layout::Layout) ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
124
```@meta CurrentModule = MoYe ``` ## Index ```@index Pages = ["tiling.md"] ``` ```@docs @tile @parallelize ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
1264
# MoYeArray [`MoYeArray`](@ref) befinits from the [`Layout`](@ref) to be able to create all kinds of special arrays. For example, we can create a `FillArray`-like array ```@repl array using MoYe MoYeArray{Float64}(one, @Layout((3,4), (0, 0))) ans.engine ``` As you can see the array only contains one element. Under the hood, the physical length of that array is calculated by [`cosize`](@ref): ```@repl array cosize(@Layout((3,4), (0, 0))) ``` The underlying implementation of MoYeArray determines that linear indexing is actually periodic. ```@repl array function f() B = MoYeArray([1,2,3], @Layout((3,), (1,))) @show @inbounds B[4], B[5], B[6], B[7] end f(); ``` We can also easily create a so-called `BlockArray`: ```@repl array data = collect(1:48); B=MoYeArray(data, @Layout(((2,3), (2,4)), ((1, 16), (2, 4)))) ``` Here we created a 2x3 block array with 2x4 blocks. The first mode is the index of the block, the second mode is the index within the block. ## Slicing It is **required** to use the syntax `view(a, ids...) or `@view a[ids...]`, depenting on your tast. ```@repl array data = [i for i in 1:164]; a = MoYeArray(data, ((_3, 2), (2, _5, _2)), ((4,1), (_2, 13, 100))) b = @view a[2, :] ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
4208
## Memcpy Async With the NVIDIA Ampere architecture, you can asynchronously copy data between GPU global memory and shared memory and not tie up threads to shepherd data movement. To utilize this feature, we simply change the `TiledCopy` to the following ```julia copy_A = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{TA}, TA}(), @Layout((32, 8)), @Layout((1, 1))) copy_B = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{TB}, TB}(), @Layout((32, 8)), @Layout((1, 1))) ``` The updated kernel function. ```julia function matmul_kernel(A, sA_layout, copy_A, B, sB_layout, copy_B, C, mma_C) sA = MoYeSharedArray(eltype(A), sA_layout) sB = MoYeSharedArray(eltype(B), sB_layout) mA = MoYeArray(A) mB = MoYeArray(B) mC = MoYeArray(C) bM = size(sA_layout, 1) bN = size(sB_layout, 1) bK = size(sB_layout, 2) gA = @tile mA (bM, bK) (blockIdx().x, :) gB = @tile mB (bN, bK) (blockIdx().y, :) gC = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # copy partition thr_copy_a = get_slice(copy_A, threadIdx().x) tAgA = partition_S(thr_copy_a, gA) # (CPY, CPY_M, CPY_K, k) tAsA = partition_D(thr_copy_a, sA) # (CPY, CPY_M, CPY_K) thr_copy_b = get_slice(copy_B, threadIdx().x) tBgB = partition_S(thr_copy_b, gB) # (CPY, CPY_N, CPY_K, k) tBsB = partition_D(thr_copy_b, sB) # (CPY, CPY_N, CPY_K) # mma partition thr_mma = get_slice(mma_C, threadIdx().x) tCsA = partition_A(thr_mma, sA) # (MMA, MMA_M, MMA_K) tCsB = partition_B(thr_mma, sB) # (MMA, MMA_M, MMA_K) tCgC = partition_C(thr_mma, gC) # (MMA, MMA_M, MMA_N) # accumulator tCrC = make_fragment_C(thr_mma, tCgC) zeros!(tCrC) for k in axes(tAgA, 4) copyto!(copy_A, tAsA, view(tAgA, :, :, :, k)) copyto!(copy_B, tBsB, view(tBgB, :, :, :, k)) cp_async_wait() @gc_preserve gemm!(mma_C, tCrC, tCsA, tCsB, tCrC) sync_threads() end copyto!(tCgC, tCrC) return nothing end function matmul(A, B, C) bM = _128 bN = _128 bK = _8 sA_layout = make_layout((bM, bK), (_1, bM + _1)) sB_layout = make_layout((bN, bK), (_1, bN + _1)) TA = eltype(A) TB = eltype(B) TC = eltype(C) copy_A = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{TA}, TA}(), @Layout((32, 8)), @Layout((1, 1))) copy_B = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{TB}, TB}(), @Layout((32, 8)), @Layout((1, 1))) mma_C = make_tiled_mma(UniversalFMA{TA,TB, TC}(), # MMA operation @Layout((32,8))) # Atom layout threads = Int(size(mma_C)) blocks = (cld(size(A, 1), bM), cld(size(B, 1), bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, sA_layout, copy_A, B, sB_layout, copy_B, C, mma_C) end function test() A = CUDA.randn(Float32, 2048, 256) B = CUDA.randn(Float32, 2048, 256) C = CUDA.randn(Float32, 2048, 2048) matmul(A, B, C) CUDA.synchronize() @test C == A * B' CUDA.unsafe_free!(A) CUDA.unsafe_free!(B) CUDA.unsafe_free!(C) end test() ``` ## Vectorized copy We can change `CPOP_ASYNC_CACHEALWAYS{TA}/CPOP_ASYNC_CACHEALWAYS{TB}` to `CPOP_ASYNC_CACHEALWAYS{Float64}` to enable vectorized copies from global memory to shared memory. However, doing so will resul in a memory misaligned error. This is because we have padded `sA` and `sB` by one row. The element at `[1,2]` is not aligned to 8 bytes as required by the copy_async instruction, hence the error. We also need the following changes ```julia sA_layout = make_layout((bM, bK), (_1, bM + _2)) sB_layout = make_layout((bN, bK), (_1, bN + _2)) ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
1204
# Broadcasting Broadcasting is only defined for [`MoYeArray`](@ref)s with static sizes. In-place broadcasting preserves the original layout. Out-of-place broadcasting always returns an owning array of a compact layout with the same shape and the stride ordered the same. ```@repl bc using MoYe a = MoYeArray{Float64}(undef, @Layout((3,2), (2,1))) fill!(a, 1.0); a .* 3 a .+ a ``` ```@repl bc b = MoYeArray{Float64}(undef, @Layout((3,), (2,))) |> zeros!; # Create a vector a .- b ``` ## On GPU (In-place) broadcasting on device should just work: ```julia julia> function f() a = MoYeArray{Float64}(undef, @Layout((3,2))) fill!(a, one(eltype(a))) a .= a .* 2 @cushow sum(a) b = CUDA.exp.(a) @cushow sum(b) return nothing end f (generic function with 1 method) julia> @cuda f() sum(a) = 12.000000 sum(b) = 44.334337 CUDA.HostKernel{typeof(f), Tuple{}}(f, CuFunction(Ptr{CUDA.CUfunc_st} @0x0000026e00ca1af0, CuModule(Ptr{CUDA.CUmod_st} @0x0000026e15cfc900, CuContext(0x0000026da1fff8b0, instance e5a1871b578f5adb))), CUDA.KernelState(Ptr{Nothing} @0x0000000204e00000)) ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
4920
# Layout Mathematically, a `Layout` represents a function that maps a logical coordinate to a 1-D index space that can be used to index into an array. It consists of a `shape` and a `stride`, wherein the `shape` determines the domain, and the `stride` establishes the mapping through an inner product. `shape` and `stride` are both defined by (recursive) tuples of integers. For example, we can construct a vector with stride 2 ```@repl layout using MoYe struct StrideVector data layout end Base.getindex(x::StrideVector, i) = x.data[x.layout(i)] a = StrideVector(collect(1:8), Layout(4, 2)) @show a[1] a[2] a[3] a[4]; ``` ## Fundamentals ```@repl layout using MoYe layout_2x4 = Layout((2, (2, 2)), (4, (1, 2))) print("Shape: ", shape(layout_2x4)) print("Stride: ", stride(layout_2x4)) print("Size: ", size(layout_2x4)) # the domain is (1,2,...,8) print("Rank: ", rank(layout_2x4)) print("Depth: ", depth(layout_2x4)) print("Cosize: ", cosize(layout_2x4)) layout_2x4 # this can be viewed as a row-major matrix ``` ### Compile-time-ness of values You can also use static integers: ```@repl layout static_layout = @Layout (2, (2, 2)) (4, (1, 2)) typeof(static_layout) sizeof(static_layout) ``` #### Different results from static Layout vs dynamic Layout It is expected to get results that **appears** to be different when the layout is static or dynamic. For example, ```@repl layout layout = @Layout (2, (1, 6)) (1, (6, 2)) print(coalesce(layout)) ``` is different from ```@repl layout layout = Layout((2, (1, 6)), (1, (6, 2))) print(coalesce(layout)) ``` But they **are** mathematically equivalent. Static information allows us to simplify the result as much as possible, whereas dynamic layouts result in dynamic checking hence type instability. ## Coordinate space The coordinate space of a `Layout` is determined by its `Shape`. This coordinate space can be viewed in three different ways: 1. h-D coordinate space: Each element in this space possesses the exact hierarchical structure as defined by the Shape. Here `h` stands for "hierarchical". 2. 1-D coordinate space: This can be visualized as the colexicographically flattening of the coordinate space into a one-dimensional space. 3. R-D coordinate space: In this space, each element has the same rank as the Shape, but each mode (top-level axis) of the `Shape` is colexicographically flattened into a one-dimensional space. Here `R` stands for the rank of the layout. ```@repl layout layout_2x4(2, (1, 2)) # h-D coordinate layout_2x4(2, 3) # R-D coordinate layout_2x4(6) # 1-D coordinate ``` ## Layout Algebra ### Concatenation A `layout` can be expressed as the concatenation of its sublayouts. ```@repl layout layout_2x4[2] # get the second sublayout tuple(layout_2x4...) # splatting a layout into sublayouts make_layout(layout_2x4...) # concatenating sublayouts for sublayout in layout_2x4 # iterating a layout @show sublayout end ``` ### Complement Let's assume that we are dealing with a vector of 24 elements. Our goal is to partition this vector into six tiles, each consisting of four elements, following a specific pattern: gather every 4 elements at even indices to a tile. This operation creates a new layout where we collect every second element until we have four elements, and then repeat this process for the rest of the vector. The resulting layout would resemble: ```julia 1 2 3 4 5 6 +----+----+----+----+----+----+ 1 | 1 | 2 | 9 | 10 | 17 | 18 | +----+----+----+----+----+----+ 2 | 3 | 4 | 11 | 12 | 19 | 20 | +----+----+----+----+----+----+ 3 | 5 | 6 | 13 | 14 | 21 | 22 | +----+----+----+----+----+----+ 4 | 7 | 8 | 15 | 16 | 23 | 24 | +----+----+----+----+----+----+ ``` `complement` computes the first row of this new layout. ```@repl layout print_layout(complement(@Layout(4,2), 24)) ``` The layout `Layout(4,2)` and it complement gives us the desired new layout. ```@repl layout print_layout(make_layout(@Layout(4, 2),complement(@Layout(4, 2), 24))) ``` ### Product #### Logical product ```@repl layout tile = @Layout((2,2), (1,2)); print_layout(tile) matrix_of_tiles = @Layout((3,4), (4,1)); print_layout(matrix_of_tiles) print_layout(logical_product(tile, matrix_of_tiles)) ``` #### Blocked product ```@repl layout print_layout(blocked_product(tile, matrix_of_tiles)) ``` #### Raked product ```@repl layout print_layout(raked_product(tile, matrix_of_tiles)) ``` ### Division #### Logical division ```@repl layout raked_prod = raked_product(tile, matrix_of_tiles); subtile = (Layout(2,3), Layout(2,4)); print_layout(logical_divide(raked_prod, subtile)) ``` #### Zipped division ```@repl layout print_layout(zipped_divide(raked_prod, subtile)) ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
7509
# MatMul ![matmuil](../assets/matmul.png) In this tutorial, we explore matrix multiplication using MoYe.jl , specifically computing the product $C = A * B^\top$. Here, matrix $A$ has dimensions $(M, K)$, matrix $B$ has dimensions $(K, N)$, and the resulting matrix $C$ will have dimensions $(M, N)$. First, we divide the task among each block. We use a tile of size (bM, bN) to partition C, with each block responsible for computing one tile. The tile's index is determined by (blockIdx().x, blockIdx().y). Computing a tile requires all values from A of shape (dM, K) and B of shape (dN, K). To reduce global memory access (since A, B, and C are stored in global memory), we further partition A and B along the K dimension, sequentially loading elements of sizes (dM, dK) and (dN, dK) into shared memory, then performing the matrix multiplication and accumulating the results into the tile of C. The partition of the global memory corresponds to the following three lines of code: ```julia gC = @tile C (bM, bN) (blockIdx().x, blockIdx().y) # (bM, bN) gA = @tile A (bM, bK) (blockIdx().x, :) # (bM, bK, K/bK) gB = @tile B (bN, bK) (blockIdx().y, :) # (bN, bK, K/bK) ``` For the specific partition syntax, please refer to [`@tile`](@ref). Here, `gA` represents `A` in shared memory. Next, we use a for loop to index-slice the last dimension of gA and gB (denoted as `k`), loading them into shared memory. The code for this step is: ```julia sA = MoYeSharedArray(eltype(gA), sA_layout) # (bM, bK) sB = MoYeSharedArray(eltype(gB), sB_layout) # (bN, bK) ``` `MoYeSharedArray` automatically allocates shared memory of size `cosize(sA_layout) + cosize(sB_layout)` and returns a `MoYeArray`. We will explain how to define the layouts for sA and sB later; for now, it's only necessary to know that they are predefined at compile time. We then need to define how thread groups collectively copy from global to shared memory. There are many ways to organize threads, which will be discussed later, such as: ```julia tA = @Layout (32, 8) tB = @Layout (32, 8) ``` This implies that there are 32x8 threads arranged in a column-major format. Next, we use them to partition the arrays: ```julia tAgA = @parallelize tA threadIdx().x # (THR_M, THR_K, k) tBgB = @parallelize tB threadIdx().x # (THR_M, THR_K) tAsA = @parallelize sA threadIdx().x # (THR_N, THR_K, k) tBsB = @parallelize sB threadIdx().x # (THR_N, THR_K) ``` For the specific syntax, please refer to [`@parallelize`](@ref). After the partition, copying is simply: ```julia copyto!(tAsA, view(tAgA, :, :, k)) copyto!(tBsB, view(tBgB, :, :, k)) ``` After copying, we proceed to the actual matrix-multiply-accumulate (mma) computation. Similarly, we need to define a layout for the thread group for this purpose: ```julia tC = @Layout (16, 16) ``` Then we use it to partition gC: ```julia tCgC = @parallelize gC tC threadIdx().x # (THR_M, THR_N) tCrC = similar(tCgC) ``` To reduce memory access to C, we also create an array `tCrC` stored in registers, which serves as the accumulator in the mma computation. After the computation, the contents are copied back into `tCgC`. A and B are slightly different because computing an element in C requires an entire row from A and an entire column from B, which is reflected in the following code: ```julia tCsA = @parallelize sA tC threadIdx().x (1, :) # (THR_M, bK) tCsB = @parallelize sB tC threadIdx().x (:, 1) # (THR_N, bK) ``` Congratulations, you have now completed all the partitions, and finally, we can compute the matrix multiplication, just as we would on a CPU: ```julia for k in axes(tCsA, 2) for m in axes(tCsA, 1) for n in axes(tCsB, 1) @inbounds tCrC[m, n] += tCsA[m, k] * tCsB[n, k] end end end ``` You can also call [`gemm!`] to perform the same operation: ```julia gemm!(tCrC, tCsA, tCsB, tCrC) ``` The complete kernel code is as follows: ```julia function matmul_kernel(A, sA_layout, tA, B, sB_layout, tB, C, tC) sA = MoYeSharedArray(eltype(A), sA_layout) # (bM, bK) sB = MoYeSharedArray(eltype(B), sB_layout) # (bN, bK) mA = MoYeArray(A) mB = MoYeArray(B) mC = MoYeArray(C) bM = size(sA_layout, 1) bN = size(sB_layout, 1) bK = size(sB_layout, 2) gA = @tile mA (bM, bK) (blockIdx().x, :) # (bM, bN) gB = @tile mB (bN, bK) (blockIdx().y, :) # (bM, bK, K/bK) gC = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # (bN, bK, K/bK) # copy partition tAgA = @parallelize gA tA threadIdx().x # (THR_M, THR_K, k) tBgB = @parallelize gB tB threadIdx().x # (THR_M, THR_K) tAsA = @parallelize sA tA threadIdx().x # (THR_N, THR_K, k) tBsB = @parallelize sB tB threadIdx().x # (THR_N, THR_K) # mma partition tCsA = @parallelize sA tC threadIdx().x (1, :) # (THR_M, bK) tCsB = @parallelize sB tC threadIdx().x (:, 1) # (THR_N, bK) tCgC = @parallelize gC tC threadIdx().x # (THR_M, THR_N) # accumulator tCrC = similar(tCgC) # (THR_M, THR_N) zeros!(tCrC) for k in axes(tAgA, 3) copyto!(tAsA, view(tAgA, :, :, k)) copyto!(tBsB, view(tBgB, :, :, k)) cp_async_wait() sync_threads() @gc_preserve gemm!(tCrC, tCsA, tCsB, tCrC) sync_threads() end copyto!(tCgC, tCrC) return nothing end ``` We still missed a few points, such as: 1. How to design `sA_layout` and `sB_layout`? For shared memory, we no longer need to consider column-major or row-major but simply need to **avoid bank conflicts**. This can be simply achieved by padding one column. ```julia sA_layout = make_layout((bM, bK), (_1, bM + _1)) sB_layout = make_layout((bN, bK), (_1, bN + _1)) ``` 2. How to design `tC`? The design of `tC` is quite flexible; it only needs to satisfy that the shape of `tC` evenly divides `(bM, bN)`. 3. How to design `tA` and `tB`? You generally want every 32 threads to access contiguous elements in A and B, so the specific design depends on the memory layout of A and B. This technique is known as **memory coalescing**. The `matmul` function looks like this: ```julia function matmul(A, B, C) bM = _128 bN = _128 bK = _8 sA_layout = make_layout((bM, bK), (_1, bM + _1)) sB_layout = make_layout((bN, bK), (_1, bN + _1)) tA = @Layout (32, 8) tB = @Layout (32, 8) tC = @Layout (16, 16) threads = Int(size(tC)) blocks = (cld(size(A, 1), bM), cld(size(B, 1), bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, sA_layout, tA, B, sB_layout, tB, C, tC) end function test() A = CUDA.randn(Float32, 2048, 256) B = CUDA.randn(Float32, 2048, 256) C = CUDA.randn(Float32, 2048, 2048) matmul(A, B, C) CUDA.synchronize() @test C == A * B' CUDA.unsafe_free!(A) CUDA.unsafe_free!(B) CUDA.unsafe_free!(C) end test() ``` This concludes the guide to implementing matrix multiplication with MoYe.jl, focusing on efficient memory management and
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
7420
## Overlap global-to-shared copies with mma compute We can overlap global-to-shared memory copies with mma compute. ![](https://developer-blogs.nvidia.com/wp-content/uploads/2020/09/sequence-asynchronous-copy-batches-1.png) To do this we will explicitly load data from shared memory to registers for the mma computation and submit a new load from global memory to shared memory for the next tile before compute. ```julia function matmul_kernel(A, sA_layout, copy_A, B, sB_layout, copy_B, C, mma_C) sA = MoYeSharedArray(eltype(A), sA_layout) sB = MoYeSharedArray(eltype(B), sB_layout) mA = MoYeArray(A) mB = MoYeArray(B) mC = MoYeArray(C) bM = size(sA_layout, 1) bN = size(sB_layout, 1) bK = size(sB_layout, 2) gA = @tile mA (bM, bK) (blockIdx().x, :) gB = @tile mB (bN, bK) (blockIdx().y, :) gC = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # copy partition thr_copy_a = get_slice(copy_A, threadIdx().x) tAgA = partition_S(thr_copy_a, gA) # (CPY, CPY_M, CPY_K, k) tAsA = partition_D(thr_copy_a, sA) # (CPY, CPY_M, CPY_K) thr_copy_b = get_slice(copy_B, threadIdx().x) tBgB = partition_S(thr_copy_b, gB) # (CPY, CPY_N, CPY_K, k) tBsB = partition_D(thr_copy_b, sB) # (CPY, CPY_N, CPY_K) # Copy gmem to smem for k_tile=1 copyto!(copy_A, tAsA, view(tAgA, :, :, :, _1)) copyto!(copy_B, tBsB, view(tBgB, :, :, :, _1)) # mma partition thr_mma = get_slice(mma_C, threadIdx().x) tCsA = partition_A(thr_mma, sA) # (MMA, MMA_M, MMA_K) tCsB = partition_B(thr_mma, sB) # (MMA, MMA_M, MMA_K) tCgC = partition_C(thr_mma, gC) # (MMA, MMA_M, MMA_N) # mma registers tCrA = make_fragment_A(thr_mma, tCsA) # (MMA, MMA_M, MMA_K) tCrB = make_fragment_B(thr_mma, tCsB) # (MMA, MMA_N, MMA_K) tCrC = make_fragment_C(thr_mma, tCgC) # (MMA, MMA_M, MMA_N) zeros!(tCrC) k_max = size(tAgA, 4) for k in 1:k_max cp_async_wait() sync_threads() # copy from smem to rmem copyto!(tCrA, tCsA) copyto!(tCrB, tCsB) sync_threads() if k < k_max copyto!(copy_A, tAsA, view(tAgA, :, :, :, k+1)) copyto!(copy_B, tBsB, view(tBgB, :, :, :, k+1)) end @gc_preserve gemm!(mma_C, tCrC, tCrA, tCrB, tCrC) end copyto!(tCgC, tCrC) return nothing end ``` ## Double buffer We can also overlap shared-to-registers memory copies with mma compute. To do this we will need to allocate two shared memory buffers, one for the current compute and one for the next tile. We prefetch the next tile from global memory to shared memory asynchronously. ![matmuil](../assets/pipeline.svg) ```julia @views function matmul_kernel(A, sA_layout, copy_A, B, sB_layout, copy_B, C, mma_C) sA = MoYeSharedArray(eltype(A), sA_layout) # (bM, bK, 2) sB = MoYeSharedArray(eltype(B), sB_layout) # (bN, bK, 2) mA = MoYeArray(A) mB = MoYeArray(B) mC = MoYeArray(C) bM = size(sA_layout, 1) bN = size(sB_layout, 1) bK = size(sB_layout, 2) gA = @tile mA (bM, bK) (blockIdx().x, :) gB = @tile mB (bN, bK) (blockIdx().y, :) gC = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # copy partition thr_copy_a = get_slice(copy_A, threadIdx().x) tAgA = partition_S(thr_copy_a, gA) # (CPY, CPY_M, CPY_K, k) tAsA = partition_D(thr_copy_a, sA) # (CPY, CPY_M, CPY_K, 2) thr_copy_b = get_slice(copy_B, threadIdx().x) tBgB = partition_S(thr_copy_b, gB) # (CPY, CPY_N, CPY_K, k) tBsB = partition_D(thr_copy_b, sB) # (CPY, CPY_N, CPY_K, 2) # Copy gmem to smem for k_tile=1 copyto!(copy_A, tAsA[:, :, :, 1], tAgA[:, :, :, _1]) copyto!(copy_B, tBsB[:, :, :, 1], tBgB[:, :, :, _1]) # mma partition thr_mma = get_slice(mma_C, threadIdx().x) tCsA = partition_A(thr_mma, sA) # (MMA, MMA_M, MMA_K, 2) tCsB = partition_B(thr_mma, sB) # (MMA, MMA_M, MMA_K, 2) tCgC = partition_C(thr_mma, gC) # (MMA, MMA_M, MMA_N) # mma registers tCrA = make_fragment_A(thr_mma, tCsA[:, :, :, _1]) # (MMA, MMA_M, MMA_K) tCrB = make_fragment_B(thr_mma, tCsB[:, :, :, _1]) # (MMA, MMA_N, MMA_K) tCrC = make_fragment_C(thr_mma, tCgC) # (MMA, MMA_M, MMA_N) zeros!(tCrC) cp_async_wait() sync_threads() # Copy smem to rmem for k_block=1 smem_read = 1 smem_write = 2 tCsA_p = view(tCsA, :, :, :, smem_read) tCsB_p = view(tCsB, :, :, :, smem_read) copyto!(tCrA[:, :, 1], tCsA_p[:, :, _1]) copyto!(tCrB[:, :, 1], tCsB_p[:, :, _1]) k_tile_max = size(tAgA, 4) k_block_max = static_size(tCrA, 3) for k_tile in 1:k_tile_max @loopinfo unroll for k_block in _1:k_block_max k_block_next = k_block + 1 if k_block == k_block_max cp_async_wait() sync_threads() tCsA_p = view(tCsA, :, :, :, smem_read) tCsB_p = view(tCsB, :, :, :, smem_read) k_block_next = 1 end copyto!(tCrA[:, :, k_block_next], tCsA_p[:, :, k_block_next]) copyto!(tCrB[:, :, k_block_next], tCsB_p[:, :, k_block_next]) if k_block == _1 && k_tile<k_tile_max copyto!(copy_A, tAsA[:, :, :, smem_write], tAgA[:, :, :, k_tile+1]) copyto!(copy_B, tBsB[:, :, :, smem_write], tBgB[:, :, :, k_tile+1]) smem_read, smem_write = smem_write, smem_read end @gc_preserve gemm!(mma_C, tCrC, tCrA[:, :, k_block], tCrB[:, :, k_block], tCrC) end end copyto!(tCgC, tCrC) return nothing end function matmul(A, B, C) bM = _128 bN = _128 bK = _8 sA_layout = make_layout((bM, bK, _2), (_1, bM + _2, (bM + _2) * bK)) sB_layout = make_layout((bN, bK, _2), (_1, bN + _2, (bN + _2) * bK)) TA = eltype(A) TB = eltype(B) TC = eltype(C) copy_A = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{Float64}, TA}(), @Layout((32, 8)), @Layout((2, 1))) copy_B = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{Float64}, TB}(), @Layout((32, 8)), @Layout((2, 1))) mma_C = make_tiled_mma(UniversalFMA{TA,TB, TC}(), # MMA operation @Layout((32, 8))) # Atom layout threads = Int(size(mma_C)) blocks = (cld(size(A, 1), bM), cld(size(B, 1), bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, sA_layout, copy_A, B, sB_layout, copy_B, C, mma_C) end function test() A = CUDA.randn(Float32, 2048, 256) B = CUDA.randn(Float32, 2048, 256) C = CUDA.randn(Float32, 2048, 2048) matmul(A, B, C) CUDA.synchronize() @test C == A * B' CUDA.unsafe_free!(A) CUDA.unsafe_free!(B) CUDA.unsafe_free!(C) end test() ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
17208
# Tensor Cores Tensor cores are specialized hardware accelerators designed to optimize matrix operations, which are crucial for deep learning and artificial intelligence algorithms. Enabling tensor cores can be as straightforward as modifying a single line of code in the existing `matmul_kernel` function: ```julia mma = make_tiled_mma(MMAOP_8x8x4_F32F16F16F32_NT(), atom_layout, tiler) ``` !!! note The NT in MMAOP_8x8x4_F32F16F16F32_NT indicates that A is in M-major order and B is in N-major order. Let's explore a minimal example ```julia mma = make_tiled_mma(MMAOP_16x8x8_F32TF32TF32F32_TN()) print_typst(mma) ``` ![](../assets/TF32F32.svg) At first glance, the diagram may seem complex, but the concept is straightforward: the threads collective load data from matrices `A` and `B` according to the specified layout. During the matrix multiply-accumulate (MMA) computation, data is internally shared among threads—a process that is not transparent to the user. Once the computation is complete, each thread stores the results as dictated by the layout of matrix `C` shown in the illustration. ```julia function matmul_kernel(A, sA_layout, copy_A, B, sB_layout, copy_B, C, mma) sA = MoYeSharedArray(eltype(A), sA_layout) sB = MoYeSharedArray(eltype(B), sB_layout) mA = MoYeArray(A) mB = MoYeArray(B) mC = MoYeArray(C) bM = size(sA_layout, 1) bN = size(sB_layout, 1) bK = size(sB_layout, 2) gA = @tile mA (bM, bK) (blockIdx().x, :) gB = @tile mB (bN, bK) (blockIdx().y, :) gC = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # copy partition thr_copy_a = get_slice(copy_A, threadIdx().x) tAgA = partition_S(thr_copy_a, gA) # (CPY, CPY_M, CPY_K, k) tAsA = partition_D(thr_copy_a, sA) # (CPY, CPY_M, CPY_K) thr_copy_b = get_slice(copy_B, threadIdx().x) tBgB = partition_S(thr_copy_b, gB) # (CPY, CPY_N, CPY_K, k) tBsB = partition_D(thr_copy_b, sB) # (CPY, CPY_N, CPY_K) # mma partition thr_mma = get_slice(mma, threadIdx().x) tCsA = partition_A(thr_mma, sA) # (MMA, MMA_M, MMA_K) tCsB = partition_B(thr_mma, sB) # (MMA, MMA_M, MMA_K) tCgC = partition_C(thr_mma, gC) # (MMA, MMA_M, MMA_N) tCrA = make_fragment_A(thr_mma, tCsA) # (MMA, MMA_M, MMA_K) tCrB = make_fragment_B(thr_mma, tCsB) tCrC = make_fragment_C(thr_mma, tCgC) zeros!(tCrC) # copy from global to shared copyto!(copy_A, tAsA, view(tAgA, :, :, :, _1)) copyto!(copy_B, tBsB, view(tBgB, :, :, :, _1)) cp_async_wait() # copy from shared to registers copyto!(tCrA, tCsA) copyto!(tCrB, tCsB) @gc_preserve gemm!(mma, tCrC, tCrA, tCrB, tCrC) copyto!(tCgC, tCrC) @inbounds tCrC[1] # compiler bug, have to load after copyto! return nothing end function matmul(A, B, C) bM = _16 bN = _8 bK = _8 sA_layout = make_layout((bM, bK), (_1, bM)) sB_layout = make_layout((bN, bK), (bK, _1)) TA = eltype(A) TB = eltype(B) TC = eltype(C) copy_A = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{UInt128}, TA}(), @Layout((4, 8)), @Layout((4, 1))) copy_B = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{UInt64}, TB}(), @Layout((8, 4), (4, 1)), @Layout((1, 2))) mma = make_tiled_mma(MMAOP_16x8x8_F32TF32TF32F32_TN()) threads = Int(size(mma)) blocks = (cld(size(A, 1), bM), cld(size(B, 1), bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, sA_layout, copy_A, B, sB_layout, copy_B, C, mma) end function test() A = CuArray(reshape(collect(1:16*8) .* 1f0, (16,8))) B = CuArray(reshape(collect(1:8*8) .* 1f0, (8,8))) C = CuArray(ones(Float32, (16,8))) matmul(A, B', C) CUDA.synchronize() @test C == A * B CUDA.unsafe_free!(A) CUDA.unsafe_free!(B) CUDA.unsafe_free!(C) end ``` ## LDMatrix The `ldmatrix` instruction at the warp level facilitates the loading of data from shared memory into registers and suffles them to align with a tensor core MMA operation. Given a tensor core MMA operation, the shuffling can be "inverted" to obtain a `TiledCopy` count for the shuffling. ```julia mma = make_tiled_mma(MMAOP_16x8x8_F32TF32TF32F32_TN()) smem_copy_A = make_tiled_copy_A(CopyAtom{LDSM_U32x4_N, Float32}(), mma) print_typst(smem_copy_A) ``` ![](../assets/smem_copy_A.svg) The resulting layout on the right hand side matches the layout of A in the `mma`. !!! note The `TN` in `MMAOP_16x8x8_F32TF32TF32F32_TN` means that both A and B are in `K`-major order. The `N` in `LDSM_U32x4_N` means the source array is `K`-major order. !!! note The `ldmatrix` requires four consecutive threads to load 16 consecutive bytes, demanding that the layout of `A` in shared memory meet this specification. For B: ```julia smem_copy_B = make_tiled_copy_B(CopyAtom{LDSM_U32x2_N, Float32}(), mma) print_typst(smem_copy_B) ``` ![](../assets/smem_copy_B.svg) !!! Note The visualization of `B` in `mma` is draw as `(K, N)` but `(N, K)` in `smem_copy_B`. We then use `smem_copy_A` and `smem_copy_B` to re-tile the shared memory and registers ```julia smem_thr_copy_A = get_slice(smem_copy_A, threadIdx().x) smem_thr_copy_B = get_slice(smem_copy_B, threadIdx().x) tCsA_retiled = partition_S(smem_thr_copy_A, sA) tCsB_retiled = partition_S(smem_thr_copy_B, sB) tCrA_retiled = retile_D(smem_thr_copy_A, tCrA) tCrB_retiled = retile_D(smem_thr_copy_B, tCrB) ``` Complete code: ```julia function matmul_kernel(A, sA_layout, gmem_copy_A, smem_copy_A, B, sB_layout, gmem_copy_B, smem_copy_B, C, mma) sA = MoYeSharedArray(eltype(A), sA_layout) sB = MoYeSharedArray(eltype(B), sB_layout) mA = MoYeArray(A) mB = MoYeArray(B) mC = MoYeArray(C) bM = size(sA_layout, 1) bN = size(sB_layout, 1) bK = size(sB_layout, 2) gA = @tile mA (bM, bK) (blockIdx().x, :) gB = @tile mB (bN, bK) (blockIdx().y, :) gC = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # gmem copy partition gmem_thr_copy_a = get_slice(gmem_copy_A, threadIdx().x) tAgA = partition_S(gmem_thr_copy_a, gA) # (CPY, CPY_M, CPY_K, k) tAsA = partition_D(gmem_thr_copy_a, sA) # (CPY, CPY_M, CPY_K) gmem_thr_copy_b = get_slice(gmem_copy_B, threadIdx().x) tBgB = partition_S(gmem_thr_copy_b, gB) # (CPY, CPY_N, CPY_K, k) tBsB = partition_D(gmem_thr_copy_b, sB) # (CPY, CPY_N, CPY_K) # copy from global to shared copyto!(gmem_copy_A, tAsA, view(tAgA, :, :, :, _1)) copyto!(gmem_copy_B, tBsB, view(tBgB, :, :, :, _1)) # mma partition thr_mma = get_slice(mma, threadIdx().x) tCsA = partition_A(thr_mma, sA) # (MMA, MMA_M, MMA_K) tCsB = partition_B(thr_mma, sB) # (MMA, MMA_M, MMA_K) tCgC = partition_C(thr_mma, gC) # (MMA, MMA_M, MMA_N) tCrA = make_fragment_A(thr_mma, tCsA) # (MMA, MMA_M, MMA_K) tCrB = make_fragment_B(thr_mma, tCsB) # (MMA, MMA_N, MMA_K) tCrC = make_fragment_C(thr_mma, tCgC) # (MMA, MMA_M, MMA_N) zeros!(tCrC) # retile smem_thr_copy_A = get_slice(smem_copy_A, threadIdx().x) smem_thr_copy_B = get_slice(smem_copy_B, threadIdx().x) tCsA_retiled = partition_S(smem_thr_copy_A, sA) tCsB_retiled = partition_S(smem_thr_copy_B, sB) tCrA_retiled = retile_D(smem_thr_copy_A, tCrA) tCrB_retiled = retile_D(smem_thr_copy_B, tCrB) cp_async_wait() # copy from shared to registers copyto!(smem_copy_A, tCrA_retiled, tCsA_retiled) copyto!(smem_copy_B, tCrB_retiled, tCsB_retiled) @gc_preserve gemm!(mma, tCrC, tCrA, tCrB, tCrC) copyto!(tCgC, tCrC) @inbounds tCrC[1] # compiler bug, have to load after copyto! return nothing end function matmul(A, B, C) bM = _16 bN = _8 bK = _8 sA_layout = make_layout((bM, bK), (_1, bM)) sB_layout = make_layout((bN, bK), (bK, _1)) TA = eltype(A) TB = eltype(B) TC = eltype(C) gmem_copy_A = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{UInt128}, TA}(), @Layout((4, 8)), @Layout((4, 1))) gmem_copy_B = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{UInt64}, TB}(), @Layout((8, 4), (4, 1)), @Layout((1, 2))) mma = make_tiled_mma(MMAOP_16x8x8_F32TF32TF32F32_TN()) # Note: A is M-major so we can only use `UniversalCopy` smem_copy_A = make_tiled_copy_A(CopyAtom{UniversalCopy{TA}, TA}(), mma) smem_copy_B = make_tiled_copy_B(CopyAtom{LDSM_U32x2_N, TB}(), mma) threads = Int(size(mma)) blocks = (cld(size(A, 1), bM), cld(size(B, 1), bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, sA_layout, gmem_copy_A, smem_copy_A, B, sB_layout, gmem_copy_B, smem_copy_B, C, mma) end ``` ## Tiled LdMatrix So far we have only talked about how to use a single `ldmatrix` instruction. We can use `tile_to_shape` to tile it to a larger shape. ```julia sB_atom_layout = make_layout((_8, _8), (_8, _1)) sB_layout = tile_to_shape(sB_atom_layout, static((24, 16))) print_layout(sB_atom_layout) print_layout(sB_layout) ``` Note how the internal layout of `sB_atom_layout` is preserved in `sB_layout`. Updated code: ```julia function matmul(A, B, C) bM = _16 bN = _8 bK = _16 sA_atom_layout = @Layout (16, 8) (1, 16) sB_atom_layout = @Layout (8, 8) (8, 1) sA_layout = MoYe.tile_to_shape(sA_atom_layout, (bM, bK)) sB_layout = MoYe.tile_to_shape(sB_atom_layout, (bN, bK)) TA = eltype(A) TB = eltype(B) TC = eltype(C) gmem_copy_A = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{UInt128}, TA}(), @Layout((4, 8)), @Layout((4, 1))) gmem_copy_B = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{UInt128}, TB}(), @Layout((8, 4), (4, 1)), @Layout((1, 4))) mma = make_tiled_mma(MMAOP_16x8x8_F32TF32TF32F32_TN()) # Note: A is M-major so we can only use `UniversalCopy` smem_copy_A = make_tiled_copy_A(CopyAtom{UniversalCopy{TA}, TA}(), mma) smem_copy_B = make_tiled_copy_B(CopyAtom{LDSM_U32x2_N, TB}(), mma) threads = Int(size(mma)) blocks = (cld(size(A, 1), bM), cld(size(B, 1), bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, sA_layout, gmem_copy_A, smem_copy_A, B, sB_layout, gmem_copy_B, smem_copy_B, C, mma) end function test() A = CUDA.randn(Float32, 16, 16) # M-major B = CUDA.randn(Float32, 16, 8) # K-major C = CUDA.randn(Float32, 16, 8) matmul(A, B', C) CUDA.synchronize() @test C == A * B CUDA.unsafe_free!(A) CUDA.unsafe_free!(B) CUDA.unsafe_free!(C) end test() ``` ## Double buffering ```julia @views function matmul_kernel(A, sA_layout, gmem_copy_A, smem_copy_A, B, sB_layout, gmem_copy_B, smem_copy_B, C, mma) sA = MoYeSharedArray(eltype(A), sA_layout) # (bM, bK, 2) sB = MoYeSharedArray(eltype(B), sB_layout) # (bN, bK, 2) mA = MoYeArray(A) mB = MoYeArray(B) mC = MoYeArray(C) bM = size(sA_layout, 1) bN = size(sB_layout, 1) bK = size(sB_layout, 2) gA = @tile mA (bM, bK) (blockIdx().x, :) gB = @tile mB (bN, bK) (blockIdx().y, :) gC = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # gmem copy partition gmem_thr_copy_A = get_slice(gmem_copy_A, threadIdx().x) tAgA = partition_S(gmem_thr_copy_A, gA) # (CPY, CPY_M, CPY_K, k) tAsA = partition_D(gmem_thr_copy_A, sA) # (CPY, CPY_M, CPY_K, 2) gmem_thr_copy_B = get_slice(gmem_copy_B, threadIdx().x) tBgB = partition_S(gmem_thr_copy_B, gB) # (CPY, CPY_N, CPY_K, k) tBsB = partition_D(gmem_thr_copy_B, sB) # (CPY, CPY_N, CPY_K, 2) # Copy gmem to smem for k_tile=1 copyto!(gmem_copy_A, tAsA[:, :, :, _1], tAgA[:, :, :, _1]) copyto!(gmem_copy_B, tBsB[:, :, :, _1], tBgB[:, :, :, _1]) # mma partition thr_mma = get_slice(mma, threadIdx().x) tCsA = partition_A(thr_mma, sA) # (MMA, MMA_M, MMA_K, 2) tCsB = partition_B(thr_mma, sB) # (MMA, MMA_M, MMA_K, 2) tCgC = partition_C(thr_mma, gC) # (MMA, MMA_M, MMA_N) tCrA = make_fragment_A(thr_mma, tCsA[:, :, :, _1]) # (MMA, MMA_M, MMA_K) tCrB = make_fragment_B(thr_mma, tCsB[:, :, :, _1]) # (MMA, MMA_N, MMA_K) tCrC = make_fragment_C(thr_mma, tCgC) # (MMA, MMA_M, MMA_N) zeros!(tCrC) # retile smem_thr_copy_A = get_slice(smem_copy_A, threadIdx().x) smem_thr_copy_B = get_slice(smem_copy_B, threadIdx().x) tCsA_retiled = partition_S(smem_thr_copy_A, sA) # (MMA, MMA_M, MMA_K, 2) tCsB_retiled = partition_S(smem_thr_copy_B, sB) # (MMA, MMA_N, MMA_K, 2) tCrA_retiled = retile_D(smem_thr_copy_A, tCrA) # (MMA, MMA_M, MMA_K) tCrB_retiled = retile_D(smem_thr_copy_B, tCrB) # (MMA, MMA_N, MMA_K) cp_async_wait() sync_threads() # Copy smem to rmem for k_block=1 smem_read = 1 smem_write = 2 tCsA_p = view(tCsA_retiled, :, :, :, smem_read) tCsB_p = view(tCsB_retiled, :, :, :, smem_read) copyto!(smem_copy_A, tCrA_retiled[:, :, _1], tCsA_p[:, :, _1]) copyto!(smem_copy_B, tCrB_retiled[:, :, _1], tCsB_p[:, :, _1]) k_tile_max = size(tAgA, 4) k_block_max = static_size(tCrA, 3) for k_tile in 1:k_tile_max @loopinfo unroll for k_block in _1:k_block_max k_block_next = k_block + 1 if k_block == k_block_max cp_async_wait() sync_threads() tCsA_p = view(tCsA_retiled, :, :, :, smem_read) tCsB_p = view(tCsB_retiled, :, :, :, smem_read) k_block_next = 1 end copyto!(smem_copy_A, tCrA_retiled[:, :, k_block_next], tCsA_p[:, :, k_block_next]) copyto!(smem_copy_B, tCrB_retiled[:, :, k_block_next], tCsB_p[:, :, k_block_next]) if k_block == _1 && k_tile<k_tile_max copyto!(gmem_copy_A, tAsA[:, :, :, smem_write], tAgA[:, :, :, k_tile+1]) copyto!(gmem_copy_B, tBsB[:, :, :, smem_write], tBgB[:, :, :, k_tile+1]) smem_read, smem_write = smem_write, smem_read end @gc_preserve gemm!(mma, tCrC, tCrA[:, :, k_block], tCrB[:, :, k_block], tCrC) end end copyto!(tCgC, tCrC) sync_threads() return nothing end function matmul(A, B, C) bM = _128 bN = _128 bK = _16 TA = eltype(A) TB = eltype(B) TC = eltype(C) mma = make_tiled_mma(MMAOP_16x8x8_F32TF32TF32F32_TN(), @Layout((2,2,1), (2,1,1)), (_32,_32,_8)) gmem_copy_A = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{UInt128}, TA}(), @Layout((16, 8)), @Layout((4, 1))) gmem_copy_B = make_tiled_copy(CopyAtom{CPOP_ASYNC_CACHEALWAYS{UInt128}, TB}(), @Layout((32, 4), (4, 1)), @Layout((1, 4))) # A is M-major so we cannot use LDSM_U32x4_N smem_copy_A = make_tiled_copy_A(CopyAtom{UniversalCopy{TA}, TA}(), mma) smem_copy_B = make_tiled_copy_B(CopyAtom{LDSM_U32x4_N, TB}(), mma) sA_atom_layout = @Layout (32, 8) (1, 32) sB_atom_layout = @Layout (8, 16) (16, 1) sA_layout = tile_to_shape(sA_atom_layout, (bM, bK, _2)) sB_layout = tile_to_shape(sB_atom_layout, (bN, bK, _2)) threads = Int(size(mma)) blocks = (cld(size(A, 1), bM), cld(size(B, 1), bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, sA_layout, gmem_copy_A, smem_copy_A, B, sB_layout, gmem_copy_B, smem_copy_B, C, mma) end function test() A = CUDA.randn(Float32, 2048, 256) # M-major B = CUDA.randn(Float32, 256, 2048) # K-major C = CUDA.randn(Float32, 2048, 2048) matmul(A, B', C) CUDA.synchronize() @test C == A * B CUDA.unsafe_free!(A) CUDA.unsafe_free!(B) CUDA.unsafe_free!(C) end test() ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
8836
Tiled Copy We have already introduced how to copy data using @tile and @partition. This process might still appear somewhat cumbersome, and `TiledCopy` serves to simplify it. Consider the following example where we employ six threads to transfer an array src of shape (4, 9) into another array dst with the identical shape. The relationship mapping logic coordinates to thread IDs can be visualized as: ```julia 1 1 1 2 2 2 3 3 3 1 1 1 2 2 2 3 3 3 4 4 4 5 5 5 6 6 6 4 4 4 5 5 5 6 6 6 ``` Here, each thread is assigned a data segment defined by the layout (2,3):(1,2). The group of threads operates within a layout of (2,3):(3,1), referred to as `val_layout` and `thr_layout`, respectively. To begin, we initialize these arrays: ```@repl tiled_copy using MoYe src_buffer = collect(1:36) .* 0.1; src = MoYeArray(src_buffer, @Layout((4,9))) dst_buffer = zeros(36); dst = MoYeArray(dst_buffer, make_layout((_4,_9))); ``` We then proceed to set up a `TiledCopy`: ```@repl tiled_copy thr_layout = @Layout (2, 3) (3, 1) val_layout = @Layout (2, 3) (1, 2) tiled_copy = make_tiled_copy( CopyAtom{UniversalCopy{Float64}, Float64}(), thr_layout, val_layout) ``` The second parameter Float64 in CopyAtom indicates that the copied data is of `Float64` type. `UniversalCopy{Float64}` is used for vectorized copy operations, meaning that the data is recast to Float64, i.e., without vectorization. Here is a vectorized `TiledCopy` ```julia tiled_copy_vec = make_tiled_copy( CopyAtom{UniversalCopy{UInt128}, Float64}(), thr_layout, val_layout) ``` Note that vectorized copy must be comatiable with `val_layout`, i.e., `val_layout` needs to have enough and divisible number of elements to be vectorized. You can visualize this `tiled_copy` by using `print_typst(tiled_copy)`. Visit [typst](https://typst.app), copy the printed string, and you will see the following image: ![matmuil](../assets/tiled_copy.svg) The two tables respectively represent the thread distribution of src and dst, which are the same here. There are also some PTX instructions involved in reallocating each thread's data, for example: ```julia print_typst(make_tiled_copy(MoYe.CopyAtom{LDSM_U32x4_N, UInt16}(), @Layout((16,2)), @Layout((2,4)))); ``` ![matmuil](../assets/ldmatrix.svg) As you can see, both thr_layout and val_layout are actually defined on dst. We will go back to `ldmatrix` when we talk about tensor cores. Returning to our example, after making the `tiled_copy`, we can use it to partition data. ```@repl tiled_copy thr_idx = 2; thr_copy = get_slice(tiled_copy, thr_idx); dst_t = partition_D(thr_copy, dst); dst_t.layout src_t = partition_S(thr_copy, src); src_t.layout copyto!(tiled_copy, dst_t, src_t); dst ``` You can see that the second thread has completed the copy. The shape of `dst_t` is `(CPY, CPY_M, CPY_K)` representing the the num of values handle by a thread in a single tile, and the demensions tiled in `dst`'s shape. Notably, the left most mode of `CPY` stands for the number of `vectorized` values. In this case it is 1, but try changing to `UniversalCopy{UInt128}` and see how the result changes. The NVIDIA Ampere architecture supports cuda::memcpy_async for asynchronously copying data between GPU global memory and shared memory without needing threads to orchestrate the data movement. In previous architectures, copying from global memory to shared memory usually involved registers for intermediation, corresponding to this syntax: ```@repl tiled_copy thr_idx = 3; thr_copy = get_slice(tiled_copy, thr_idx); dst_t = partition_D(thr_copy, dst); src_t = partition_S(thr_copy, src); dst_r = make_fragment_like(dst_t); copyto!(tiled_copy, dst_r, src_t); copyto!(tiled_copy, dst_t, dst_r); dst ``` ## TiledMMA In this section, we'll show you how to use `TiledMMA` to replace an mma partition. First, invoke the function make_tiled_mma as follows: ```@repl tiled_copy mma_C = make_tiled_mma(UniversalFMA{TA,TB, TC}(), # MMA operation @Layout((16,16))) # Atom layout ``` You can experiment with replacing `UniversalFMA` with another `MMAOp` and use print_typst to view the results. Here are the predefined MMAOps: ```@repl tiled_copy MoYe.mma_ops_list ``` ```julia thr_mma = get_slice(mma_C, threadIdx().x); tCsA = partition_A(sA); tCsB = partition_B(sB); tCgC = partition_C(gC); tCrC = make_fragment_like(tCgC) ``` These instructions operate on tensor cores, a topic we haven't covered yet (but will soon!). ## MatMul Now, we use `TiledCopy` and `TiledMMA` to upgrade the previous matmul_kernel. ```julia function matmul_kernel(A, sA_layout, copy_A, B, sB_layout, copy_B, C, mma_C) sA = MoYeSharedArray(eltype(A), sA_layout) sB = MoYeSharedArray(eltype(B), sB_layout) mA = MoYeArray(A) mB = MoYeArray(B) mC = MoYeArray(C) bM = size(sA_layout, 1) bN = size(sB_layout, 1) bK = size(sB_layout, 2) gA = @tile mA (bM, bK) (blockIdx().x, :) gB = @tile mB (bN, bK) (blockIdx().y, :) gC = @tile mC (bM, bN) (blockIdx().x, blockIdx().y) # copy partition thr_copy_a = get_slice(copy_A, threadIdx().x) tAgA = partition_S(thr_copy_a, gA) # (CPY, CPY_M, CPY_K, k) tAsA = partition_D(thr_copy_a, sA) # (CPY, CPY_M, CPY_K) tArA = make_fragment_like(tAsA) # (CPY, CPY_M, CPY_K) thr_copy_b = get_slice(copy_B, threadIdx().x) tBgB = partition_S(thr_copy_b, gB) # (CPY, CPY_N, CPY_K, k) tBsB = partition_D(thr_copy_b, sB) # (CPY, CPY_N, CPY_K) tBrB = make_fragment_like(tBsB) # (CPY, CPY_N, CPY_K) # mma partition thr_mma = get_slice(mma_C, threadIdx().x) tCsA = partition_A(thr_mma, sA) # (MMA, MMA_M, MMA_K) tCsB = partition_B(thr_mma, sB) # (MMA, MMA_M, MMA_K) tCgC = partition_C(thr_mma, gC) # (MMA, MMA_M, MMA_N) # overlap copy and compute copyto!(copy_A, tArA, view(tAgA, :, :, :, _1)) copyto!(copy_B, tBrB, view(tBgB, :, :, :, _1)) # accumulator tCrC = make_fragment_C(thr_mma, tCgC) zeros!(tCrC) k_max = size(tAgA, 4) for k in 1:k_max sync_threads() copyto!(tAsA, tArA) copyto!(tBsB, tBrB) sync_threads() # load the next tile k_next = k < k_max ? k+1 : k copyto!(copy_A, tArA, view(tAgA, :, :, :, k_next)) copyto!(copy_B, tBrB, view(tBgB, :, :, :, k_next)) @gc_preserve gemm!(mma_C, tCrC, tCsA, tCsB, tCrC) end copyto!(tCgC, tCrC) return nothing end function matmul(A, B, C) bM = _128 bN = _128 bK = _8 sA_layout = make_layout((bM, bK), (_1, bM + _1)) sB_layout = make_layout((bN, bK), (_1, bN + _1)) TA = eltype(A) TB = eltype(B) TC = eltype(C) copy_A = make_tiled_copy(CopyAtom{UniversalCopy{TA}, TA}(), @Layout((32, 8)), @Layout((1, 1))) copy_B = make_tiled_copy(CopyAtom{UniversalCopy{TB}, TB}(), @Layout((32, 8)), @Layout((1, 1))) mma_C = make_tiled_mma(UniversalFMA{TA,TB, TC}(), # MMA operation @Layout((32,8))) # Atom layout threads = Int(size(mma_C)) blocks = (cld(size(A, 1), bM), cld(size(B, 1), bN)) @cuda threads=threads blocks=blocks matmul_kernel(A, sA_layout, copy_A, B, sB_layout, copy_B, C, mma_C) end function test() A = CUDA.randn(Float32, 2048, 256) B = CUDA.randn(Float32, 2048, 256) C = CUDA.randn(Float32, 2048, 2048) matmul(A, B, C) CUDA.synchronize() @test C == A * B' CUDA.unsafe_free!(A) CUDA.unsafe_free!(B) CUDA.unsafe_free!(C) end test() ``` ## Vectorized copy As previously mentioned, you can change to `UniversalCopy{Float64}` or `UniversalCopy{UInt128}` to enabled vectoried copy. But we also need to keep in mind the copies are **coalesced**. For example, the following one is not coalesced ```julia copy_A = make_tiled_copy(CopyAtom{UniversalCopy{Float64}, TA}(), @Layout((32, 8)), @Layout((4, 1))) ``` since thread 1 is loading from `[1], [2]` and thead 2 is loading from `[5], [6]`. Theses are coalesced: ```julia copy_A = make_tiled_copy(CopyAtom{UniversalCopy{Float64}, TA}(), @Layout((32, 8)), @Layout((2, 1))) copy_A = make_tiled_copy(CopyAtom{UniversalCopy{UInt128}, TA}(), @Layout((32, 8)), @Layout((4, 1))) ```
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "BSD-3-Clause" ]
4.0.0
bdb497426ba0848924f73995feac59f34c495d28
docs
7198
# Matrix Transpose Tutorial This tutorial illustrates the process copying data between global memory and shared memory using `MoYe`. In this tutorial, we will use the following configuration: - Array size: 2048 x 2048 - Block size: 32 x 32 - Thread size: 32 x 8 ## Copy Kernel We start with a copy kernel. ```julia using MoYe, Test, CUDA function copy_kernel(dest, src, smemlayout, blocklayout, threadlayout) moye_smem = MoYeSharedArray(eltype(dest), smemlayout) moye_dest = MoYeArray(dest) moye_src = MoYeArray(src) bM = size(blocklayout, 1) bN = size(blocklayout, 2) blocktile_dest = @tile moye_dest (bM, bN) (blockIdx().x, blockIdx().y) blocktile_src = @tile moye_src (bM, bN) (blockIdx().x, blockIdx().y) threadtile_dest = @parallelize blocktile_dest threadlayout threadIdx().x threadtile_src = @parallelize blocktile_src threadlayout threadIdx().x threadtile_smem = @parallelize moye_smem threadlayout threadIdx().x for i in eachindex(threadtile_smem) threadtile_smem[i] = threadtile_src[i] end for i in eachindex(threadtile_dest) threadtile_dest[i] = threadtile_smem[i] end return nothing end function test_copy_async(M, N) a = CUDA.rand(Float32, M, N) b = CUDA.rand(Float32, M, N) blocklayout = @Layout (32, 32) # 32 * 32 elements in a block smemlayout = @Layout (32, 32) # 32 * 32 elements in shared memory threadlayout = @Layout (32, 8) # 32 * 8 threads in a block bM = size(blocklayout, 1) bN = size(blocklayout, 2) blocks = (cld(M, bM), cld(N, bN)) threads = Int(size(threadlayout)) @cuda blocks=blocks threads=threads copy_kernel(a, b, smemlayout, blocklayout, threadlayout) CUDA.synchronize() @test a == b end test_copy_async(2048, 2048) ``` ## Code Explanation The device function follows these steps: 1. Allocate shared memory using `MoYeSharedArray` with a static layout. 2. Wrap the destination and source arrays with dynamic layouts. 3. Get the size of each block in the grid (bM and bN). 4. Create local tiles for the destination and source arrays using [`@tile`](@ref). 5. Partition the local tiles into thread tiles using [`@parallelize`](@ref). 6. Copy data from the source thread tile to the shared memory thread tile. 7. Synchronize threads. 8. Copy data back from the shared memory thread tile to the destination thread tile. The host function tests the copy_kernel function with the following steps: 1. Define the dimensions M and N for the source and destination arrays. 2. Create random GPU arrays a and b with the specified dimensions using CUDA.rand. 3. Define the block and thread layouts using [`@Layout`](@ref) for creating **static** layouts. 4. Calculate the number of blocks in the grid using `cld`. Here we assume the divisibility. A few things to notice here: 1. [`@tile`](@ref) means that all of our blocks cover the entire array. 2. Each block contains 32 x 32 elements of the original array, but we have 32 x 8 threads per block, which means that each thread processes 4 elements. The code ```julia @parallelize blocktile_dest threadlayout threadIdx().x ``` returns the set of elements that the thread corresponding to threadIdx().x is processing, which in this case is an array of length 4. 3. Once we have completed all the tiling, we just perform computations as if we were dealing with a regular array: ```julia for i in eachindex(threadtile_smem) threadtile_smem[i] = threadtile_src[i] end ``` You need not concern yourself with index bookkeeping, it is implicitly handled by the layout; instead, concentrate on the computation aspect, as it is a fundamental objective of MoYe.jl. Additionally, you can use the [`copyto!`](@ref) function for static `MoYeArray` with two key feature: copying from global memory to shared memory automatically calls `cp.async` (Requires `sm_80` or higher), and automatic vectorization when possible. Here is how it would look like using `copyto!`. ```julia function copy_kernel(dest, src, smemlayout, blocklayout, threadlayout) moye_smem = MoYeSharedArray(eltype(dest), smemlayout) moye_dest = MoYeArray(dest) moye_src = MoYeArray(src) bM = size(blocklayout, 1) bN = size(blocklayout, 2) blocktile_dest = @tile moye_dest (bM, bN) (blockIdx().x, blockIdx().y) blocktile_src = @tile moye_src (bM, bN) (blockIdx().x, blockIdx().y) threadtile_dest = @parallelize blocktile_dest threadlayout threadIdx().x threadtile_src = @parallelize blocktile_src threadlayout threadIdx().x threadtile_smem = @parallelize moye_smem threadlayout threadIdx().x copyto!(threadtile_smem, threadtile_src) cp_async_wait() copyto!(threadtile_dest, threadtile_smem) return nothing end ``` ## Padding Shared Memory Note that in the above code, the layout of the shared memory is the same as the block layout. However, we often need to pad the shared array to avoid bank conflicts. We just need to change one line of code: ```julia smemlayout = @Layout (32, 32) (1, 31) # pad one row ``` Also note that our kernel will recompile for different static layout parameters. ## Transpose kernel Now we turn to the transpose kernel. ```julia function transpose_kernel(dest, src, smemlayout, blocklayout, threadlayout) moye_smem = MoYeSharedArray(eltype(dest), smemlayout) moye_src = MoYeArray(src) moye_dest = MoYeArray(dest) bM = size(blocklayout, 1) bN = size(blocklayout, 2) blocktile_src = @tile moye_src (bM, bN) (blockIdx().x, blockIdx().y) blocktile_dest = @tile moye_dest (bN, bM) (blockIdx().y, blockIdx().x) threadtile_dest = @parallelize blocktile_dest threadlayout threadIdx().x threadtile_src = @parallelize blocktile_src threadlayout threadIdx().x threadtile_smem = @parallelize moye_smem threadlayout threadIdx().x copyto!(threadtile_smem, threadtile_src) cp_async_wait() sync_threads() moye_smem′ = MoYe.transpose(moye_smem) threadtile_smem′ = @parallelize moye_smem′ threadlayout threadIdx().x copyto!(threadtile_dest, threadtile_smem′) return nothing end function test_transpose(M, N) a = CUDA.rand(Float32, M, N) b = CUDA.rand(Float32, N, M) blocklayout = @Layout (32, 32) smemlayout = @Layout (32, 32) (1, 33) threadlayout = @Layout (32, 8) bM = size(blocklayout, 1) bN = size(blocklayout, 2) blocks = (cld(M, bM), cld(N, bN)) threads = Int(size(threadlayout)) @cuda blocks=blocks threads=threads transpose_kernel(a, b, smemlayout, blocklayout, threadlayout) CUDA.synchronize() @test a == transpose(b) end test_transpose(2048, 2048) ``` It is almost identical to the copy kernel, but we would need to transpose the shared memory by simply transposing its layout ```julia moye_smem′ = MoYe.transpose(moye_smem) ``` and then compute the new thread tiles. Note that each thread would work on different elements now so we need to call `sync_threads()`.
MoYe
https://github.com/YichengDWu/MoYe.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
629
using CBFV using Documenter makedocs(; modules=[CBFV], authors="Stefan Bringuier <[email protected]> and contributors", repo="https://github.com/JuliaMatSci/CBFV.jl/blob/{commit}{path}#L{line}", sitename="CBFV.jl", format=Documenter.HTML(; prettyurls=get(ENV, "CI", "false") == "true", canonical="https://JuliaMatSci.github.io/CBFV.jl", assets=String[], ), pages=[ "Intro" => "index.md", "Examples" => "examples.md", "API" => "api.md", ], ) Modules = [CBFV] deploydocs(; repo="github.com/JuliaMatSci/CBFV.jl", devbranch="master" )
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
396
# see LICENSE module CBFV using ProgressBars using CSV using DataFrames export FileName export readdatabasefile export processelementdatabase export processinputdata export generatefeatures include("GlobalConst.jl") include("Types.jl") include("Errors.jl") include("Databases.jl") include("ParseFormula.jl") include("Composition.jl") include("ProcessData.jl") include("Featurization.jl") end
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
1845
# see LICENSE """ elementalcomposition(formula;frmtarray=true) Construct a element count fdictionary. # Arguments -`formula::String`: chemical formula -`frmtarray::Bool`: flag indicating type of return value # Returns -`amountelement::Dict{String,Float64} or Tuple{Array,Array}` """ function elementalcomposition(formula::String;frmtarray=true) elementmap = parseformula(formula) amountelement = Dict{String,Float64}() for (key,val) in elementmap if abs(val) ≥ 1.0e-4 amountelement[key] = val end end return frmtarray ? dicttoarray(amountelement) : amountelement end # function elementalcomposition """ fractionalcomposition(formula;frmtarray=true) Construct a composition fraction dictionary. # Arguments -`formula::String`: chemical formula -`frmtarray::Bool`: flag indicating type of return value # Returns -`compositionfrac::Dict{String,Float64} or Tuple{Array,Array}` """ function fractionalcomposition(formula::String;frmtarray=true) # this part we need the function call frmtarray=false amountelement = elementalcomposition(formula,frmtarray=false) natoms = sum(abs.(values(amountelement))) compositionfrac = Dict(key => amountelement[key]/natoms for key in keys(amountelement)) return frmtarray ? dicttoarray(compositionfrac) : compositionfrac end # function fractionalcomposition """ dicttoarray(dict) convert a dictionary of Dict{String,T<:Number} to two arrays of keys and values. # Arguments -`dict::Dict{String,Number}`: dictionary to convert # Returns -`k::Array{Number}`: an array corresponding to the keys -`v::Array{Number}`: an array corresponding to the values """ function dicttoarray(dict::Dict{String,T}) where T<:Number k = [a for a in keys(dict)] v = [c for c in values(dict)] return k,v end # function dicttoarray
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
1694
# see LICENSE """ generate_available_databases() # Returns -`dictfiles::Dict{String,String}`: database name and filename+extension # TODO - This will eventually get replaced by using and `Artifcats.toml` that will provide the interface for getting the database. """ function generate_available_databases() path = joinpath(rootdir,"databases"); listfiles = split.(readdir(path),".") dictfiles = Dict(String(l[1])=>joinpath(path,l[1]*"."*l[2]) for l in listfiles) pop!(dictfiles,"README") return dictfiles end # function generatedatabases show_available_databases() = show(keys(generate_available_databases())) """ readdatabasefile(pathtofile) Returns DataFrame of an elemental database file in [databases/](databases/) # Arguments - `pathtofile::String`: path to the CSV formatted file to read - `stringtype::Type{Union{String,InlineString}}=String` : `CSV.jl` string storage type - `pool::Bool=false` : `CSV.File` will pool `String` column values. # Returns - `data::DataFrame`: the dataframe representation of the csv file. !!! note Some of the behaviors of `CSV.jl` will create data types that are inconnsistant with the several function argument types in `CBFV`. If you use this function to read the data files the data frame constructed via CSV will work properly. """ function readdatabasefile(pathtofile::AbstractString; stringtype::Type{T}=String, pool=false) where T<:Union{String,InlineString} # Use CSV and dataframes data = CSV.File(pathtofile,stringtype=stringtype,pool=pool) |> DataFrame return data end # function readdatabasefile
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
1490
# see LICENSE function checkifempty(dataframe::DataFrame) if isempty(dataframe) error("Dataframe is empty!") end return nothing end function checkdataframe(dataframe::DataFrame) if !hasproperty(dataframe,:formula) && !hasproperty(dataframe,:target) error("Dataframe is missing columns :formula or :target") end return nothing end function checkcombineallowed(dataframe::DataFrame) #extrprops = dataframe[!,Not([:formula,:target])] if !isempty(dataframe) return true else @warn "Combining of features requested but none exist, skipping!" return false end end function elementmissinginfo(element::String,formula::String) @info("The elemental database didn't contain the $(element) in for \n input formula $(formula), so values is being set to NaN.") end # elementmissing function elementwarn(element,formula;row=nothing) if row !== nothing @warn("$(element) in chemical formula $(formula) on row $(row) is not a valid symbol or is an unsupported element, this formula/entry is being skipped.") else @warn("$(element) in chemical formula $(formula) is not a valid symbol or is an unsupported element, this formula/entry is being skipped.") end return nothing end # function elementwarn function databaseerror(name::String) error("The database or file name $(name) could not be found or loaded") return nothing end # function databaseerror
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
9645
# see LICENSE """ combinefeatures(features,extras) combinefeatures(features,featnames,extras) Combines existing features in data with the prepared features. Returns additional vector of column names for a database. # Arguments - `features::AbstractArray`: Generated features of data - `extras::DataFrame`: The data frame representation of the orignial data. - `featnames::Vector`: The column names of the generated features. # Returns - `newfeatures::AbstractArray`: Combined features - `combfeatnames::Vector{String}`: Combined names of feature columns. """ function combinefeatures(features::AbstractArray, extras::DataFrame) if checkcombineallowed(extras) extrasarry = Array(extras) newfeatures = hcat(features, extrasarry) else newfeatures = features end return newfeatures end # function combinefeatures combinefeatures(features::AbstractArray, featnames::Vector, extras::DataFrame) = begin if checkcombineallowed(extras) extrasarry = Array(extras) newfeatures = hcat(features, extrasarry) combfeatnames = vcat(featnames, names(extras)) else newfeatures = features combfeatnames = featnames end return newfeatures,combfeatnames end """ assignfeatures(processeddata,formulae,sumfeatures) This is the primary function that assigns the features based on the CBFV approach. For more details its best to see the original python CBFV and references in README file. # Arguments - `processeddata::Vector{Dict{Symbol,Any}}` : the formulas processed against elemental database - `formulae::AbstractArray` : the formula string values, this should be some subtype of `Array{String,1}` - `sumfeatures::Bool=false` : wheter to create a `sum_` feature vector # Returns - `featuresarry::Vector{Matrix{Float64}}` : feature vectors for each row in original data set. - `skippedformula::Vector{String}` : skipped formulas !!! note The `generatefeatures` call does not do anything (i.e. return) the skippedformulas. """ function assignfeatures(processeddata::Vector{Dict{Symbol,Any}}, formulae::AbstractArray, sumfeatures::Bool=false) iterformulae = ProgressBar(1:length(formulae)) skippedformula = Array{String,1}() features = Vector{Matrix{Float64}}(undef, length(formulae)) Threads.@threads for i in iterformulae formula = formulae[i] amount = processeddata[i][:amount]::Vector{Float64} properties = processeddata[i][:eleprops]::Matrix{Float64} # Each formula has a n-element by m-feature matrix representation. # Construct all the feature vectors frange = [e[2] - e[1] for e in extrema(properties, dims=1)] fmax = maximum(properties, dims=1) fmin = minimum(properties, dims=1) _, fraccomp = fractionalcomposition(formula) favg = sum(fraccomp .* properties, dims=1) #FIX: Not sure whats going on here fdev = sum(fraccomp .* abs.(properties .- favg), dims=1) prominant = isapprox.(fraccomp, maximum(fraccomp)) fmode = minimum(properties[prominant, :], dims=1) fweight = sumfeatures ? sum(amount .* properties, dims=1) : amount .* properties if sumfeatures features[i] = hcat(fweight, favg, fdev, frange, fmax, fmin, fmode) else features[i] = hcat(favg, fdev, frange, fmax, fmin, fmode) end set_description(iterformulae, "Assigning features...") end featuresarry = reduce(vcat, features) return featuresarry, skippedformula end # function assignfeatures """ constructfeaturedataframe(featcolnames,features,extrafeatures,sumfeatures) Return a `DataFrame` data type given the features with column names and if extra features are to be added. In addition if the summation statistics should be used as a feature. The column name prefixes are fixed based on the CBFV approach which is to use the formula statistical moments from the element features in the formula. # Arguments - `featcolnames::Vector{String}` : The name of the columns for the feature vectors - `features::Array{Float64,2}` : The feature vectors - `extrafeatures::Tuple{Bool,DataFrame}` : These are the features carried from the input data - `sumfeatures::Bool` : wheter or not to add sum statistics feature vector # Returns - `DataFrame` : the dataframe for the features """ function constructfeaturedataframe(featcolnames::Vector{String}, features::Array{Float64,2}, extrafeatures::Tuple{Bool,DataFrame}, sumfeatures::Bool) if sumfeatures colprefixes = ["sum_", "avg_", "dev_", "range_", "max_", "min_", "mode_"] else colprefixes = ["avg_", "dev_", "range_", "max_", "min_", "mode_"] end modcolnames = [] for p in colprefixes push!(modcolnames, fill(p, length(featcolnames)) .* featcolnames) end featnames = reduce(vcat, modcolnames) dictfeatnames = Dict{String,Vector}() if extrafeatures[1] combfeatures,combinedfeatnames = combinefeatures(features, featnames, extrafeatures[2]) for (i, n) in enumerate(combinedfeatnames) dictfeatnames[n] = combfeatures[:,i] end else for (i, n) in enumerate(featnames) dictfeatnames[n] = features[:, i] end end return DataFrame(dictfeatnames) end # function constructfeaturedataframe """ generatefeatures(data; elementdata,dropduplicate,combine,sumfeatures,returndataframe) generatefeatures(dataname; kwargs...) This is the primary function for generating the CBFV features for a dataset of formulas with or without existing features. This function will process the input data and grab the provided element database. The assigning of features is then executed based on the CBFV approach. If the `returndataframe=true` then a `DataFrame` data type is returned by this function with the added columns `:target` and `:formula`. !!! note I am not using `OrderedDict` so the column names will be arranged based on the native `Dict` ordering. # Arguments - `data::DataFrame`: This is the data set that you want to be featurized for example. - `elementdata::Union{String,FileName} or Union{String,DataFrame}`: The name of the internal database or the file path and name to an external database. - `dropduplicate::Bool=true`: Option to drop duplicate entries. - `combine::Bool=false`: Option to combine existing features in `data` with the generated feature set. - `sumfeatures::Bool=false`: Option to include the `sum_` feature columns. - `returndataframe::Bool=true`: Option to return a `DataFrame`. Will include `:target` and `:formula` columns. # Returns - `generatedataframe::DataFrame` - `formulae::Vector{String}, features::Array{Number,2}, targets::Vector{Number}` The following featurization schemes are included within CBFV.jl: - `oliynyk` (default) - `magpie` - `mat2vec` - `jarvis` - `onehot` - `random_200` ```@example using DataFrames using CBFV d = DataFrame(:formula=>["Tc1V1","Cu1Dy1","Cd3N2"],:target=>[248.539,66.8444,91.5034]) generatefeatures(d) ``` """ function generatefeatures(data::DataFrame, elementdata::Union{String,DataFrame}="oliynyk"; dropduplicate=true, combine=false, sumfeatures=false, returndataframe=true) # Remove duplicate entries if dropduplicate moddata = unique(data) else moddata = data end # Process input data checkdataframe(data) formulae = moddata[!, :formula] featcolnames, processeddata = processinputdata(moddata, elementdata) targets = [row[:target] for row in processeddata] # Featurization features, skippedformulas = assignfeatures(processeddata, formulae, sumfeatures) # Extra features from original data extrafeatures = moddata[!, Not([:formula, :target])] if combine checkifempty(extrafeatures) end if returndataframe generatedataframe = constructfeaturedataframe(featcolnames, features, (combine, extrafeatures), sumfeatures) generatedataframe[!, :formula] = formulae generatedataframe[!, :target] = targets return generatedataframe else if combine combinefeatures(features, extrafeatures) end return formulae, features, targets end end # function generatefeaturesdata generatefeatures(data::DataFrame; elementdata::Union{FileName,String}="oliynyk", dropduplicate=true, combine=false, sumfeatures=false, returndataframe=true) = begin if typeof(elementdata) == FileName elementdataframe = readdatabasefile(elementdata.fullpath) generatefeatures(data,elementdataframe, dropduplicate=dropduplicate, combine=combine, sumfeatures=sumfeatures, returndataframe=returndataframe) else generatefeatures(data,elementdata, dropduplicate=dropduplicate, combine=combine, sumfeatures=sumfeatures, returndataframe=returndataframe) end end generatefeatures(dataname::String; kwargs...) = begin # Digest data file before processing data = readdatabasefile(dataname)::DataFrame generatefeatures(data; kwargs...) end
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
717
# see LICENSE global const rootsrc = @__DIR__ global const rootdir = joinpath(@__DIR__,"../") global const allowedperiodictable = ( "H","He","Li","B","Be","C","N","O","F","Ne", "Na","Mg","Al","Si","P","S","Cl","Ar", "K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr", "Rb","Sr","Y","Zr","Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe", "Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl","Pb","Bi","Po","At","Rn", "Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Cn","Nh","Fl","Mc","Lv","Ts","Og" )
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
4772
# see LICENSE """ stripamt(element::String) Strip the amount associated with element. !!! Note Doesn't handle fractional amounts. """ stripamt(element::String) = begin charints = ['0','1','2','3','4','5','6','7','8','9'] strip(element,charints) end """ splitcap(formula::String) Split a formula by capital letters. """ splitcap(formula::String) = begin regex = r"(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z][0-9])" split(formula,regex) end """ replacechar(formula) Remove and replace characters based. Function assumes default keys `@`, `[`, and `]` to swap others can be added via keyword argument. # Arguments - `formula::String`: the chemical formula, e.g. CO2, H2O - `addswapkeys::Array{Pair{String,String}}=[Pair("","")]`: additional characters to swap. # Returns - `formula::String` """ function replacechar(formula::String;addswapkeys=[Pair("","")]) swapkeys = [Pair("@",""),Pair("[","("),Pair("]",")")]; if addswapkeys != [Pair("","")] for addkey in addswapkeys push!(swapkeys,addkey) end end modformula = formula for key in swapkeys modformula = replace(modformula,key) end return modformula end # function replacechar """ setdefaultdict(formula) Initializes the default dictionary of type Dict{String,Float64}. Each element is set to 0. """ function setdefaultdict(formula::String) defaultdict = Dict{String,Float64}() elements = findall(r"([A-Z][a-z]*)",formula) for e in elements defaultdict[formula[e]] = 0.00e0 end return defaultdict end # function getdefaultdict """ getrepresentation(formulaunit;molfactor=1) Return the formula elemental make-up in terms of multiples of a given element in chemical formula. Amount due to molecular complexes in chemical formula (e.g., Li3Fe2(PO4)3 ) are handeled with keyword argument # Arguments -`formulaunit::String`: the chemical formula for provided unit, e.g. CO2, H2O -`molfactor::Integer`: the repeating occurance of molecular complexes, e.g., XX(PO)3 # Returns -`elementalamount::Dict{String,Float64}` """ function getrepresentation(formulaunit::String;molfactor=1.00e0) elementalamount = setdefaultdict(formulaunit); #Assign amount to each element in formula elementgroups = findall(r"([A-Z][a-z]*)\s*([-*\.\d]*)",formulaunit); for eg in elementgroups el,amt = map(String,match(r"([A-Z][a-z]?)([-*\.\d]*)?",formulaunit[eg]).captures) famt = isempty(amt) ? 1.00e0 : parse(Float64,amt) if el ∈ allowedperiodictable elementalamount[el] += famt*molfactor else elementwarn(el,formulaunit) end end return elementalamount end # function getrepresentation """ rewriteformula(formula::String) If formula contains molecular units in the form of AB(CD3)2, rewrite as ABC2D6. # Arguments - `formula::String`: A chemical formula such as Li3Fe2(PO4)3 to rewrite # Returns - 'modformula::String`: the rewritten chemical formula such as Li3F2P3O12. """ function rewriteformula(formula::String) modformula = formula molecularunits = eachmatch(r"\(([^\(\)]+)\)\s*([\.\d]*)",formula) for molunit in molecularunits molecule,repeat = map(String,molunit.captures) frepeat = isempty(repeat) ? 1.00e0 : parse(Float64,repeat) elementgroups = findall(r"([A-Z][a-z]*)\s*([-*\.\d]*)",molecule); molrewrite = "" for eg in elementgroups element,amount = map(String,match(r"([A-Z][a-z]?)(\d*\d?)",molecule[eg]).captures) famount = isempty(amount) ? 1.00e0 : parse(Float64,amount) famount *= frepeat molrewrite *= "$(element)$(famount)" end modformula = replace(modformula,molunit.match => molrewrite) end return modformula end #function rewriteformula """ parseformula(formula::String) Creates a dictionary of elements and stoichiometry for compound formula. If formula is written with molecular groupings (e.g., Li3Fe2(PO4)3), then rewrite string. # Arguments -`formula::String`: the chemical formula, e.g., Li3Fe2(PO4)3 # Returns -`Dict{String,Int}`: returns the function call which produces a composition dictionary """ function parseformula(formula::String) modformula = replacechar(formula) :: String #Check if formula match of type AB(CD)3 molecularunits = match(r"\(([^\(\)]+)\)\s*([\.\d]*)",modformula) if molecularunits ≠ nothing modformula = rewriteformula(modformula) end formuladict = getrepresentation(modformula) return formuladict end # function parseformula parseformula(formula::Symbol) = parseformula(String(formula))
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
7027
# see LICENSE """ replacemissing!(data) replace missing values in columns of a dataframe with average value of that column. # Arguments - `data::DataFrame`: data frame with missing values. # Modifies - `data::DataFrame`: changes `missing` values to mane values of column. """ function replacemissing!(data::DataFrame) columnnames = names(data[!, Not(:element)]) statdata = describe(data); for (i,n) in enumerate(columnnames) data[!,n] = coalesce.(data[!,n],statdata[i,:mean]) end end """ removeunsupported!(datainput,elementproperties) Handle cases where compound can't be processed because it isn't an allowed element symbol. # Arguments - `datainput::DataFrame`: data frame representation of input data set. - `elementproperties::Abstract`: Array of elements with properties for featurization. # Modifies - `datainput`: removes unsupported items. """ function removeunsupported!(datainput::DataFrame) formulas = copy(datainput[!, :formula]) rows = range(1, nrow(datainput),step=1) splitformulas = keys.(getrepresentation.(formulas)) for i = 1:length(formulas) for el in splitformulas[i] if stripamt(el) ∉ allowedperiodictable # modify so that only those rows not equal are kept. elementwarn(el, formulas[i], row=rows[i]) filter!(row -> row.formula != formulas[i], datainput) end end end end # function removeunsupported! """ extractproperties(elements,properties,formulaelements,formula) returns an array of properties for elements that are in a formula. # Arguments - `elements::Vector{String}`: supported elements from elemental database. - `properties::AbstractArray`: array formatted database element properties - `formulaelements::Array{String,1}`: elements form a formula in array format. - `formula::String`: the formula name. # Returns - `extractdedproperties::Array{Any,2}`: parsed/selected element properties from elementproperties. """ function extractproperties(elements::Vector{String}, properties::Array{T,2}, formulaelements::Array{String,1}, formula::String) where T<:Number _, m = size(properties) l = length(formulaelements) extractedproperties = Array{Float64,2}(undef, l, m) for (i, e) in enumerate(formulaelements) if stripamt(e) ∉ elements elementmissinginfo(e, formula) extractedproperties[i, :] = fill(NaN, m) else coordinate = findfirst(x -> x == e, elements) extractedproperties[i, :] = properties[coordinate, :] end end return extractedproperties end # function extractproperties """ getelementpropertydatabase(databasename) Reads a elemental database file given its name. # Arguments - `databasename::String="oliynyk"`: name of internally available database file. See [databases](CBFV/databases) # Returns - `database::DataFrame` """ function getelementpropertydatabase(databasename::String="oliynyk") #Check if databasename in database folder using function call databases = generate_available_databases()::Dict{String,String} if databasename ∈ keys(databases) database = readdatabasefile(databases[databasename]) else databaseerror(databasename) end return database::DataFrame end # function elementpropertydatabase """ processelementdatabase(data) Takes the element feature dataframe and process it to return a dictionary with values of type `Array{String,N}`` and a Array representation of the entire database. # Arguments - `data::DataFrame`: element feature dataframe from database file # Returns - `elementproperties::Dict{Symbol,Array{String,N}}` : dictionary with keys `:symbols`,`:index`, and `:missing` which return Array{String,N} values for the dataframe - `arrayrepresentation::Array{Any,2}`: representation of the dataframe """ function processelementdatabase(data::DataFrame;replacemissing=true) if replacemissing replacemissing!(data) end elementsymbols = convert(Vector{String}, data[!, :element]) elementindex = collect(1:nrow(data)) elementmissing = collect(setdiff( Set(allowedperiodictable), Set(elementsymbols) ))::Array{String,1} elementinfo = Dict(:symbols => elementsymbols, :index => elementindex, :missing => elementmissing) arrayrepresentation = Tables.matrix(data[!, Not(:element)]) columnnames = names(data[!, Not(:element)]) return elementinfo, columnnames, arrayrepresentation end # function processelementdatabase processelementdatabase(databasename::String;kwargs...) = begin data = getelementpropertydatabase(databasename) processelementdatabase(data,kwargs...) end processelementdatabase(databasepath::FileName;kwargs...) = begin data = readdatabasefile(databasepath.fullpath) processelementdatabase(data,kwargs...) end """ processinputdata(datainput,elementdatabase) Take the data set that contains the formula's, target values, and additional features and then extract the elemental properties from the element database provided. Also get the column/feature used in the element properties. # Arguments - `datainput::DataFrame`: data containing columns `:formula` and `:target`. - `elementfeatures::Array{Number,2}`: element feature set based on database # Returns - `elpropnames::Array{String,1}`: The names of the properties in elemental database - `processeddata::Vector{Dict{Symbol,Any}}`: The processed input data based on elemental database. """ function processinputdata(datainput::DataFrame, elementdatabase::DataFrame) elementinfo, elpropnames, elementsproperties = processelementdatabase(elementdatabase) checkdataframe(datainput) removeunsupported!(datainput) n, _ = size(datainput) processeddata = Vector{Dict{Symbol,Any}}(undef, n) #CHECK: Is this a performant iterator table = Tables.namedtupleiterator(datainput[!, [:formula, :target]]) i = 1 itertable = ProgressBar(table) for row in itertable formula, target = row[1], row[2] elements, amount = elementalcomposition(formula) extractedproperties = extractproperties(elementinfo[:symbols], elementsproperties, elements, formula) processeddata[i] = Dict(:elements => elements, :amount => amount, :eleprops => extractedproperties, :target => target) i += 1 set_description(itertable, "Preparing input data...") end return elpropnames, processeddata end # function processinputdata processinputdata(datainput::DataFrame, elementdatabasename::String) = begin eledb = getelementpropertydatabase(elementdatabasename) processinputdata(datainput, eledb) end processinputdata(datainput::DataFrame) = begin eledb = getelementpropertydatabase("oliynyk") processinputdata(datainput, eledb) end
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
150
# see LICENSE """ generatefeatures Datatype for multiple dispatch. Allows for passing external database. """ struct FileName fullpath::String end
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
code
5008
using CBFV using Test using CSV, DataFrames @testset "CBFV.jl" begin @testset "Types.jl constructors" begin begin afile = CBFV.FileName("test/file.txt") @test afile.fullpath == "test/file.txt" end end # Types.jl tesset @testset "ProcessFormulaInput.jl functions" begin @test CBFV.replacechar("Y3N@C80") == "Y3NC80" @test CBFV.replacechar("Li3Fe2[PO4]3") == "Li3Fe2(PO4)3" @test CBFV.replacechar("SiO_2",addswapkeys=[Pair("_","")]) == "SiO2" @test CBFV.setdefaultdict("SiO2") == Dict("Si"=>0,"O"=>0) @test CBFV.getrepresentation("PO4",molfactor=3) == Dict("P"=>3,"O"=>12) @test CBFV.getrepresentation("SiO2") == Dict("Si"=>1,"O"=>2) @test CBFV.rewriteformula("Li3Fe2(PO4)3") == "Li3Fe2P3.0O12.0" @test CBFV.rewriteformula("Si(OH)4(CO2)3") == "SiO4.0H4.0C3.0O6.0" begin correct = Dict("Li"=>3,"Fe"=>2,"P"=>3,"O"=>12); output = CBFV.parseformula("Li3Fe2[PO4]3") @test output == correct end begin correct = Dict("Y"=>3,"N"=>1,"C"=>80); output = CBFV.parseformula("Y3N@C80") @test output == correct end begin correct = Dict("H"=>4,"O"=>2); output = CBFV.parseformula("(H2O)(H2O)") @test output == correct end correct = Dict("H"=>2,"O"=>1); output = CBFV.parseformula("(H2O)0.5(H2O)0.5") @test output == correct end # ProcessFormulaInput.jl testset @testset "Composition.jl functions" begin begin correct = Dict("Li"=>3,"Fe"=>2,"P"=>3,"O"=>12); output = CBFV.elementalcomposition("Li3Fe2[PO4]3",frmtarray=false) @test output == correct end begin correct = Dict("Y"=>3/84,"N"=>1/84,"C"=>80/84) output = CBFV.fractionalcomposition("Y3N@C80",frmtarray=false) @test output == correct end begin correct = (["Y","C","N"],[3/84 , 80/84 , 1/84]) output = CBFV.fractionalcomposition("Y3N@C80") @test correct == output end end # Composition.jl testset @testset "Databases.jl functions" begin @test typeof(CBFV.generate_available_databases()) == Dict{String,String} # @test CBFV.show_available_databases() == nothing end # Databases.jl testset @testset "ProcessData.jl functions" begin @test typeof(CBFV.getelementpropertydatabase()) == DataFrame begin eletest = DataFrame(:element => ["Pr","Ni","Ru"], :C0 => [58.7, 257.5, 562.1], :C1 => [25.6, 171.8, 183.8], :C2 => [0.0, 0.0, 0.0], :C3 => [0.0, 0.0, 0.0]) # NOTE: Ficticious materials inputtest = DataFrame(:formula=>["PrNi2","PrNi","PrRuNi3"],:target=>[1000.0,1200.0,2400.0]) colnames,processdata = CBFV.processinputdata(inputtest,eletest) @test typeof(colnames) == Vector{String} @test processdata[1][:elements] == ["Pr","Ni"] @test processdata[1][:amount] == [1.0,2.0] @test processdata[2][:eleprops] == [58.7 25.6 0.0 0.0; 257.5 171.8 0.0 0.0] @test processdata[2][:target] == 1200.0 @test processdata[3][:elements] == ["Pr","Ni","Ru"] @test processdata[3][:amount] == [1.0,3.0,1.0] inputtest = DataFrame(:formula=>["PrX2","PrNi","PrRuQ3"],:target=>[1000.0,1200.0,2400.0]) eleinfo,processdata = CBFV.processinputdata(inputtest,eletest) @test length(processdata) == 1 @test processdata[1][:elements] == ["Pr","Ni"] @test processdata[1][:amount] == [1.0,1.0] @test !isnothing(CBFV.processinputdata(inputtest)) end end # ProcessData.jl testset @testset "Featurization.jl functions" begin d = DataFrame(:formula=>["Tc1V1","Cu1Dy1","Cd3N2"], :property=>[1.0,0.5,1.0], :target=>[248.539,66.8444,91.5034]) featdb = CBFV.generatefeatures(d,returndataframe=true) tmpfile = tempname() CSV.write(tmpfile,d) @test featdb == CBFV.generatefeatures(tmpfile,returndataframe=true) @test CBFV.generatefeatures(d,combine=true)[!,:property] == d[!,:property] testdb = CSV.File("pycbfv_test_data.csv") |> DataFrame @test length(names(featdb[!,Not([:target,:formula])])) == length(names(testdb)) @testset "Column $n" for n in names(testdb) @test testdb[!,n] ≈ featdb[!,n] end featdb_ext = CBFV.generatefeatures(d, elementdata=CBFV.FileName((@__DIR__)*"/../databases/oliynyk.csv")) @test featdb_ext == featdb end # Featurization.jl testset end
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git
[ "MIT" ]
0.1.0
81fba898c4e80b8549f5348b74d08197a04afc85
docs
4817
# CBFV.jl : A simple composition-based feature vectorization utility in Julia [![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://juliamatsci.github.io/CBFV.jl/stable) [![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://juliamatsci.github.io/CBFV.jl/dev) [![Build Status](https://github.com/juliamatsci/CBFV.jl/workflows/CI/badge.svg)](https://github.com/JuliaMatSci/CBFV.jl/actions)[![Coverage](https://codecov.io/gh/JuliaMatSci/CBFV.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/JuliaMatSci/CBFV.jl) This is a Julia rewrite of the [python tool](https://github.com/kaaiian/CBFV) to create a composition-based feature vector representation for machine learning with materials science data. The ideas and methodology are discussed in the recent article: >Wang, Anthony Yu-Tung; Murdock, Ryan J.; Kauwe, Steven K.; Oliynyk, Anton O.; Gurlo, Aleksander; Brgoch, Jakoah; Persson, Kristin A.; Sparks, Taylor D., [Machine Learning for Materials Scientists: An Introductory Guide toward Best Practices](https://doi.org/10.1021/acs.chemmater.0c01907), *Chemistry of Materials* **2020**, *32 (12)*: 4954–4965. DOI: [10.1021/acs.chemmater.0c01907](https://doi.org/10.1021/acs.chemmater.0c01907). and the original python source code(s) can be found here: - [https://github.com/anthony-wang/BestPractices/tree/master/notebooks/CBFV](https://github.com/anthony-wang/BestPractices/tree/master/notebooks/CBFV) - [https://github.com/kaaiian/CBFV](https://github.com/kaaiian/CBFV) ## Example Use The input data set should have a least two columns with the header/names `formula` and `target`. ```julia using DataFrames using CBFV data = DataFrame("name"=>["Rb2Te","CdCl2","LaN"],"bandgap_eV"=>[1.88,3.51,1.12]) rename!(data,Dict("name"=>"formula","bandgap_eV"=>"target")) features = generatefeatures(data) ``` The thing to note is you most likely will still want to post-process the generated feature data using some transformation to scale the data. The [StatsBase.jl](https://juliastats.org/StatsBase.jl/stable/transformations/) package provides some basic fetures for this, although the input needs to be `AbstractMatrix{<:Real}` rather than a `DataFrame`. This can be achieved using `generatefeatures(data,returndataframe=false)` ## Supported Featurization Schemes As with the orignal CBFV python package the following element databases are available: - `oliynyk` (default): Database from A. Oliynyk. - `magpie`: [Materials Agnostic Platform for Informatics and Exploration](https://bitbucket.org/wolverton/magpie/src/master/) - `mat2vec`: [Word embeddings capture latent knowledge from materials science](https://github.com/materialsintelligence/mat2vec) - `jarvis`: [Joint Automated Repository for Various Integrated Simulations provided by U.S. National Institutes of Standards and Technologies.](https://jarvis.nist.gov/) - `onehot`: Simple one hot encoding scheme, i.e., diagonal elemental matrix. - `random_200`: 200 random elemental properties (I'm assuming). However, `CBFV.jl` will allow you to provide your own element database to featurize with. Also, the current implementation reads the saved `.csv` file in [`databases`](@ref), however, this is prone to potential issues (ex. out of date files). To alleviate this I will change the implementation to utilize `Pkg.Artificats` with a `Artificats.toml` file that enables grabbing the datafiles needed from a server if they don't exist locally already. ### Julia Dependencies This is a relatively small package so there aren't a lot of dependencies. The required packages are: - CSV - DataFrames - ProgressBars ## Citations Pleae cite the following when and if you use this package in your work: ```bibtex @misc{CBFV.jl, author = {Bringuier, Stefan}, year = {2021}, title = {CBFV.jl - A simple composition based feature vectorization Julia utility}, url = {https://github.com/JuliaMatSci/CBFV.jl}, } ``` In addition, please also consider citing the original python implementation and tutorial paper. ```bibtex @misc{CBFV, author = {Kauwe, Steven and Wang, Anthony Yu-Tung and Falkowski, Andrew}, title = {CBFV: Composition-based feature vectors}, url = {https://github.com/kaaiian/CBFV} } ``` ```bibtex @article{Wang2020bestpractices, author = {Wang, Anthony Yu-Tung and Murdock, Ryan J. and Kauwe, Steven K. and Oliynyk, Anton O. and Gurlo, Aleksander and Brgoch, Jakoah and Persson, Kristin A. and Sparks, Taylor D.}, year = {2020}, title = {Machine Learning for Materials Scientists: An Introductory Guide toward Best Practices}, url = {https://doi.org/10.1021/acs.chemmater.0c01907}, pages = {4954--4965}, volume = {32}, number = {12}, issn = {0897-4756}, journal = {Chemistry of Materials}, doi = {10.1021/acs.chemmater.0c01907} } ```
CBFV
https://github.com/JuliaMatSci/CBFV.jl.git