repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BASIC_PRECONDITIONERS_H
#define EIGEN_BASIC_PRECONDITIONERS_H
namespace Eigen {
/** \ingroup IterativeLinearSolvers_Module
* \brief A preconditioner based on the digonal entries
*
* This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix.
* In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:
\code
A.diagonal().asDiagonal() . x = b
\endcode
*
* \tparam _Scalar the type of the scalar.
*
* \implsparsesolverconcept
*
* This preconditioner is suitable for both selfadjoint and general problems.
* The diagonal entries are pre-inverted and stored into a dense vector.
*
* \note A variant that has yet to be implemented would attempt to preserve the norm of each column.
*
* \sa class LeastSquareDiagonalPreconditioner, class ConjugateGradient
*/
template <typename _Scalar>
class DiagonalPreconditioner
{
typedef _Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> Vector;
public:
typedef typename Vector::StorageIndex StorageIndex;
enum {
ColsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic
};
DiagonalPreconditioner() : m_isInitialized(false) {}
template<typename MatType>
explicit DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols())
{
compute(mat);
}
Index rows() const { return m_invdiag.size(); }
Index cols() const { return m_invdiag.size(); }
template<typename MatType>
DiagonalPreconditioner& analyzePattern(const MatType& )
{
return *this;
}
template<typename MatType>
DiagonalPreconditioner& factorize(const MatType& mat)
{
m_invdiag.resize(mat.cols());
for(int j=0; j<mat.outerSize(); ++j)
{
typename MatType::InnerIterator it(mat,j);
while(it && it.index()!=j) ++it;
if(it && it.index()==j && it.value()!=Scalar(0))
m_invdiag(j) = Scalar(1)/it.value();
else
m_invdiag(j) = Scalar(1);
}
m_isInitialized = true;
return *this;
}
template<typename MatType>
DiagonalPreconditioner& compute(const MatType& mat)
{
return factorize(mat);
}
/** \internal */
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_invdiag.array() * b.array() ;
}
template<typename Rhs> inline const Solve<DiagonalPreconditioner, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized.");
eigen_assert(m_invdiag.size()==b.rows()
&& "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
return Solve<DiagonalPreconditioner, Rhs>(*this, b.derived());
}
ComputationInfo info() { return Success; }
protected:
Vector m_invdiag;
bool m_isInitialized;
};
/** \ingroup IterativeLinearSolvers_Module
* \brief Jacobi preconditioner for LeastSquaresConjugateGradient
*
* This class allows to approximately solve for A' A x = A' b problems assuming A' A is a diagonal matrix.
* In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:
\code
(A.adjoint() * A).diagonal().asDiagonal() * x = b
\endcode
*
* \tparam _Scalar the type of the scalar.
*
* \implsparsesolverconcept
*
* The diagonal entries are pre-inverted and stored into a dense vector.
*
* \sa class LeastSquaresConjugateGradient, class DiagonalPreconditioner
*/
template <typename _Scalar>
class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar>
{
typedef _Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef DiagonalPreconditioner<_Scalar> Base;
using Base::m_invdiag;
public:
LeastSquareDiagonalPreconditioner() : Base() {}
template<typename MatType>
explicit LeastSquareDiagonalPreconditioner(const MatType& mat) : Base()
{
compute(mat);
}
template<typename MatType>
LeastSquareDiagonalPreconditioner& analyzePattern(const MatType& )
{
return *this;
}
template<typename MatType>
LeastSquareDiagonalPreconditioner& factorize(const MatType& mat)
{
// Compute the inverse squared-norm of each column of mat
m_invdiag.resize(mat.cols());
if(MatType::IsRowMajor)
{
m_invdiag.setZero();
for(Index j=0; j<mat.outerSize(); ++j)
{
for(typename MatType::InnerIterator it(mat,j); it; ++it)
m_invdiag(it.index()) += numext::abs2(it.value());
}
for(Index j=0; j<mat.cols(); ++j)
if(numext::real(m_invdiag(j))>RealScalar(0))
m_invdiag(j) = RealScalar(1)/numext::real(m_invdiag(j));
}
else
{
for(Index j=0; j<mat.outerSize(); ++j)
{
RealScalar sum = mat.col(j).squaredNorm();
if(sum>RealScalar(0))
m_invdiag(j) = RealScalar(1)/sum;
else
m_invdiag(j) = RealScalar(1);
}
}
Base::m_isInitialized = true;
return *this;
}
template<typename MatType>
LeastSquareDiagonalPreconditioner& compute(const MatType& mat)
{
return factorize(mat);
}
ComputationInfo info() { return Success; }
protected:
};
/** \ingroup IterativeLinearSolvers_Module
* \brief A naive preconditioner which approximates any matrix as the identity matrix
*
* \implsparsesolverconcept
*
* \sa class DiagonalPreconditioner
*/
class IdentityPreconditioner
{
public:
IdentityPreconditioner() {}
template<typename MatrixType>
explicit IdentityPreconditioner(const MatrixType& ) {}
template<typename MatrixType>
IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; }
template<typename MatrixType>
IdentityPreconditioner& factorize(const MatrixType& ) { return *this; }
template<typename MatrixType>
IdentityPreconditioner& compute(const MatrixType& ) { return *this; }
template<typename Rhs>
inline const Rhs& solve(const Rhs& b) const { return b; }
ComputationInfo info() { return Success; }
};
} // end namespace Eigen
#endif // EIGEN_BASIC_PRECONDITIONERS_H
| 6,755 | 28.762115 | 111 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2014 Gael Guennebaud <[email protected]>
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BICGSTAB_H
#define EIGEN_BICGSTAB_H
namespace Eigen {
namespace internal {
/** \internal Low-level bi conjugate gradient stabilized algorithm
* \param mat The matrix A
* \param rhs The right hand side vector b
* \param x On input and initial solution, on output the computed solution.
* \param precond A preconditioner being able to efficiently solve for an
* approximation of Ax=b (regardless of b)
* \param iters On input the max number of iteration, on output the number of performed iterations.
* \param tol_error On input the tolerance error, on output an estimation of the relative error.
* \return false in the case of numerical issue, for example a break down of BiCGSTAB.
*/
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error)
{
using std::sqrt;
using std::abs;
typedef typename Dest::RealScalar RealScalar;
typedef typename Dest::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> VectorType;
RealScalar tol = tol_error;
Index maxIters = iters;
Index n = mat.cols();
VectorType r = rhs - mat * x;
VectorType r0 = r;
RealScalar r0_sqnorm = r0.squaredNorm();
RealScalar rhs_sqnorm = rhs.squaredNorm();
if(rhs_sqnorm == 0)
{
x.setZero();
return true;
}
Scalar rho = 1;
Scalar alpha = 1;
Scalar w = 1;
VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
VectorType y(n), z(n);
VectorType kt(n), ks(n);
VectorType s(n), t(n);
RealScalar tol2 = tol*tol*rhs_sqnorm;
RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
Index i = 0;
Index restarts = 0;
while ( r.squaredNorm() > tol2 && i<maxIters )
{
Scalar rho_old = rho;
rho = r0.dot(r);
if (abs(rho) < eps2*r0_sqnorm)
{
// The new residual vector became too orthogonal to the arbitrarily chosen direction r0
// Let's restart with a new r0:
r = rhs - mat * x;
r0 = r;
rho = r0_sqnorm = r.squaredNorm();
if(restarts++ == 0)
i = 0;
}
Scalar beta = (rho/rho_old) * (alpha / w);
p = r + beta * (p - w * v);
y = precond.solve(p);
v.noalias() = mat * y;
alpha = rho / r0.dot(v);
s = r - alpha * v;
z = precond.solve(s);
t.noalias() = mat * z;
RealScalar tmp = t.squaredNorm();
if(tmp>RealScalar(0))
w = t.dot(s) / tmp;
else
w = Scalar(0);
x += alpha * y + w * z;
r = s - w * t;
++i;
}
tol_error = sqrt(r.squaredNorm()/rhs_sqnorm);
iters = i;
return true;
}
}
template< typename _MatrixType,
typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
class BiCGSTAB;
namespace internal {
template< typename _MatrixType, typename _Preconditioner>
struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
{
typedef _MatrixType MatrixType;
typedef _Preconditioner Preconditioner;
};
}
/** \ingroup IterativeLinearSolvers_Module
* \brief A bi conjugate gradient stabilized solver for sparse square problems
*
* This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient
* stabilized algorithm. The vectors x and b can be either dense or sparse.
*
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
*
* \implsparsesolverconcept
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
* and NumTraits<Scalar>::epsilon() for the tolerance.
*
* The tolerance corresponds to the relative residual error: |Ax-b|/|b|
*
* \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format.
* Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled.
* See \ref TopicMultiThreading for details.
*
* This class can be used as the direct solver classes. Here is a typical usage example:
* \include BiCGSTAB_simple.cpp
*
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method.
*
* BiCGSTAB can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename _MatrixType, typename _Preconditioner>
class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> >
{
typedef IterativeSolverBase<BiCGSTAB> Base;
using Base::matrix;
using Base::m_error;
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
public:
/** Default constructor. */
BiCGSTAB() : Base() {}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
*
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
~BiCGSTAB() {}
/** \internal */
template<typename Rhs,typename Dest>
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
bool failed = false;
for(Index j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
if(!internal::bicgstab(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))
failed = true;
}
m_info = failed ? NumericalIssue
: m_error <= Base::m_tolerance ? Success
: NoConvergence;
m_isInitialized = true;
}
/** \internal */
using Base::_solve_impl;
template<typename Rhs,typename Dest>
void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const
{
x.resize(this->rows(),b.cols());
x.setZero();
_solve_with_guess_impl(b,x);
}
protected:
};
} // end namespace Eigen
#endif // EIGEN_BICGSTAB_H
| 7,251 | 30.668122 | 121 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
// Copyright (C) 2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_INCOMPLETE_LUT_H
#define EIGEN_INCOMPLETE_LUT_H
namespace Eigen {
namespace internal {
/** \internal
* Compute a quick-sort split of a vector
* On output, the vector row is permuted such that its elements satisfy
* abs(row(i)) >= abs(row(ncut)) if i<ncut
* abs(row(i)) <= abs(row(ncut)) if i>ncut
* \param row The vector of values
* \param ind The array of index for the elements in @p row
* \param ncut The number of largest elements to keep
**/
template <typename VectorV, typename VectorI>
Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
{
typedef typename VectorV::RealScalar RealScalar;
using std::swap;
using std::abs;
Index mid;
Index n = row.size(); /* length of the vector */
Index first, last ;
ncut--; /* to fit the zero-based indices */
first = 0;
last = n-1;
if (ncut < first || ncut > last ) return 0;
do {
mid = first;
RealScalar abskey = abs(row(mid));
for (Index j = first + 1; j <= last; j++) {
if ( abs(row(j)) > abskey) {
++mid;
swap(row(mid), row(j));
swap(ind(mid), ind(j));
}
}
/* Interchange for the pivot element */
swap(row(mid), row(first));
swap(ind(mid), ind(first));
if (mid > ncut) last = mid - 1;
else if (mid < ncut ) first = mid + 1;
} while (mid != ncut );
return 0; /* mid is equal to ncut */
}
}// end namespace internal
/** \ingroup IterativeLinearSolvers_Module
* \class IncompleteLUT
* \brief Incomplete LU factorization with dual-threshold strategy
*
* \implsparsesolverconcept
*
* During the numerical factorization, two dropping rules are used :
* 1) any element whose magnitude is less than some tolerance is dropped.
* This tolerance is obtained by multiplying the input tolerance @p droptol
* by the average magnitude of all the original elements in the current row.
* 2) After the elimination of the row, only the @p fill largest elements in
* the L part and the @p fill largest elements in the U part are kept
* (in addition to the diagonal element ). Note that @p fill is computed from
* the input parameter @p fillfactor which is used the ratio to control the fill_in
* relatively to the initial number of nonzero elements.
*
* The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
* and when @p fill=n/2 with @p droptol being different to zero.
*
* References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization,
* Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
*
* NOTE : The following implementation is derived from the ILUT implementation
* in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota
* released under the terms of the GNU LGPL:
* http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
* However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
* See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
* http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
* alternatively, on GMANE:
* http://comments.gmane.org/gmane.comp.lib.eigen/3302
*/
template <typename _Scalar, typename _StorageIndex = int>
class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageIndex> >
{
protected:
typedef SparseSolverBase<IncompleteLUT> Base;
using Base::m_isInitialized;
public:
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<Scalar,Dynamic,1> Vector;
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
typedef SparseMatrix<Scalar,RowMajor,StorageIndex> FactorType;
enum {
ColsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic
};
public:
IncompleteLUT()
: m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
m_analysisIsOk(false), m_factorizationIsOk(false)
{}
template<typename MatrixType>
explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
: m_droptol(droptol),m_fillfactor(fillfactor),
m_analysisIsOk(false),m_factorizationIsOk(false)
{
eigen_assert(fillfactor != 0);
compute(mat);
}
Index rows() const { return m_lu.rows(); }
Index cols() const { return m_lu.cols(); }
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was succesful,
* \c NumericalIssue if the matrix.appears to be negative.
*/
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
return m_info;
}
template<typename MatrixType>
void analyzePattern(const MatrixType& amat);
template<typename MatrixType>
void factorize(const MatrixType& amat);
/**
* Compute an incomplete LU factorization with dual threshold on the matrix mat
* No pivoting is done in this version
*
**/
template<typename MatrixType>
IncompleteLUT& compute(const MatrixType& amat)
{
analyzePattern(amat);
factorize(amat);
return *this;
}
void setDroptol(const RealScalar& droptol);
void setFillfactor(int fillfactor);
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_Pinv * b;
x = m_lu.template triangularView<UnitLower>().solve(x);
x = m_lu.template triangularView<Upper>().solve(x);
x = m_P * x;
}
protected:
/** keeps off-diagonal entries; drops diagonal entries */
struct keep_diag {
inline bool operator() (const Index& row, const Index& col, const Scalar&) const
{
return row!=col;
}
};
protected:
FactorType m_lu;
RealScalar m_droptol;
int m_fillfactor;
bool m_analysisIsOk;
bool m_factorizationIsOk;
ComputationInfo m_info;
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // Fill-reducing permutation
PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // Inverse permutation
};
/**
* Set control parameter droptol
* \param droptol Drop any element whose magnitude is less than this tolerance
**/
template<typename Scalar, typename StorageIndex>
void IncompleteLUT<Scalar,StorageIndex>::setDroptol(const RealScalar& droptol)
{
this->m_droptol = droptol;
}
/**
* Set control parameter fillfactor
* \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row.
**/
template<typename Scalar, typename StorageIndex>
void IncompleteLUT<Scalar,StorageIndex>::setFillfactor(int fillfactor)
{
this->m_fillfactor = fillfactor;
}
template <typename Scalar, typename StorageIndex>
template<typename _MatrixType>
void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat)
{
// Compute the Fill-reducing permutation
// Since ILUT does not perform any numerical pivoting,
// it is highly preferable to keep the diagonal through symmetric permutations.
#ifndef EIGEN_MPL2_ONLY
// To this end, let's symmetrize the pattern and perform AMD on it.
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1;
AMDOrdering<StorageIndex> ordering;
ordering(AtA,m_P);
m_Pinv = m_P.inverse(); // cache the inverse permutation
#else
// If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine.
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
COLAMDOrdering<StorageIndex> ordering;
ordering(mat1,m_Pinv);
m_P = m_Pinv.inverse();
#endif
m_analysisIsOk = true;
m_factorizationIsOk = false;
m_isInitialized = true;
}
template <typename Scalar, typename StorageIndex>
template<typename _MatrixType>
void IncompleteLUT<Scalar,StorageIndex>::factorize(const _MatrixType& amat)
{
using std::sqrt;
using std::swap;
using std::abs;
using internal::convert_index;
eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
Index n = amat.cols(); // Size of the matrix
m_lu.resize(n,n);
// Declare Working vectors and variables
Vector u(n) ; // real values of the row -- maximum size is n --
VectorI ju(n); // column position of the values in u -- maximum size is n
VectorI jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1
// Apply the fill-reducing permutation
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
SparseMatrix<Scalar,RowMajor, StorageIndex> mat;
mat = amat.twistedBy(m_Pinv);
// Initialization
jr.fill(-1);
ju.fill(0);
u.fill(0);
// number of largest elements to keep in each row:
Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1;
if (fill_in > n) fill_in = n;
// number of largest nonzero elements to keep in the L and the U part of the current row:
Index nnzL = fill_in/2;
Index nnzU = nnzL;
m_lu.reserve(n * (nnzL + nnzU + 1));
// global loop over the rows of the sparse matrix
for (Index ii = 0; ii < n; ii++)
{
// 1 - copy the lower and the upper part of the row i of mat in the working vector u
Index sizeu = 1; // number of nonzero elements in the upper part of the current row
Index sizel = 0; // number of nonzero elements in the lower part of the current row
ju(ii) = convert_index<StorageIndex>(ii);
u(ii) = 0;
jr(ii) = convert_index<StorageIndex>(ii);
RealScalar rownorm = 0;
typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
for (; j_it; ++j_it)
{
Index k = j_it.index();
if (k < ii)
{
// copy the lower part
ju(sizel) = convert_index<StorageIndex>(k);
u(sizel) = j_it.value();
jr(k) = convert_index<StorageIndex>(sizel);
++sizel;
}
else if (k == ii)
{
u(ii) = j_it.value();
}
else
{
// copy the upper part
Index jpos = ii + sizeu;
ju(jpos) = convert_index<StorageIndex>(k);
u(jpos) = j_it.value();
jr(k) = convert_index<StorageIndex>(jpos);
++sizeu;
}
rownorm += numext::abs2(j_it.value());
}
// 2 - detect possible zero row
if(rownorm==0)
{
m_info = NumericalIssue;
return;
}
// Take the 2-norm of the current row as a relative tolerance
rownorm = sqrt(rownorm);
// 3 - eliminate the previous nonzero rows
Index jj = 0;
Index len = 0;
while (jj < sizel)
{
// In order to eliminate in the correct order,
// we must select first the smallest column index among ju(jj:sizel)
Index k;
Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
k += jj;
if (minrow != ju(jj))
{
// swap the two locations
Index j = ju(jj);
swap(ju(jj), ju(k));
jr(minrow) = convert_index<StorageIndex>(jj);
jr(j) = convert_index<StorageIndex>(k);
swap(u(jj), u(k));
}
// Reset this location
jr(minrow) = -1;
// Start elimination
typename FactorType::InnerIterator ki_it(m_lu, minrow);
while (ki_it && ki_it.index() < minrow) ++ki_it;
eigen_internal_assert(ki_it && ki_it.col()==minrow);
Scalar fact = u(jj) / ki_it.value();
// drop too small elements
if(abs(fact) <= m_droptol)
{
jj++;
continue;
}
// linear combination of the current row ii and the row minrow
++ki_it;
for (; ki_it; ++ki_it)
{
Scalar prod = fact * ki_it.value();
Index j = ki_it.index();
Index jpos = jr(j);
if (jpos == -1) // fill-in element
{
Index newpos;
if (j >= ii) // dealing with the upper part
{
newpos = ii + sizeu;
sizeu++;
eigen_internal_assert(sizeu<=n);
}
else // dealing with the lower part
{
newpos = sizel;
sizel++;
eigen_internal_assert(sizel<=ii);
}
ju(newpos) = convert_index<StorageIndex>(j);
u(newpos) = -prod;
jr(j) = convert_index<StorageIndex>(newpos);
}
else
u(jpos) -= prod;
}
// store the pivot element
u(len) = fact;
ju(len) = convert_index<StorageIndex>(minrow);
++len;
jj++;
} // end of the elimination on the row ii
// reset the upper part of the pointer jr to zero
for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
// 4 - partially sort and insert the elements in the m_lu matrix
// sort the L-part of the row
sizel = len;
len = (std::min)(sizel, nnzL);
typename Vector::SegmentReturnType ul(u.segment(0, sizel));
typename VectorI::SegmentReturnType jul(ju.segment(0, sizel));
internal::QuickSplit(ul, jul, len);
// store the largest m_fill elements of the L part
m_lu.startVec(ii);
for(Index k = 0; k < len; k++)
m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
// store the diagonal element
// apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)
if (u(ii) == Scalar(0))
u(ii) = sqrt(m_droptol) * rownorm;
m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);
// sort the U-part of the row
// apply the dropping rule first
len = 0;
for(Index k = 1; k < sizeu; k++)
{
if(abs(u(ii+k)) > m_droptol * rownorm )
{
++len;
u(ii + len) = u(ii + k);
ju(ii + len) = ju(ii + k);
}
}
sizeu = len + 1; // +1 to take into account the diagonal element
len = (std::min)(sizeu, nnzU);
typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
typename VectorI::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
internal::QuickSplit(uu, juu, len);
// store the largest elements of the U part
for(Index k = ii + 1; k < ii + len; k++)
m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
}
m_lu.finalize();
m_lu.makeCompressed();
m_factorizationIsOk = true;
m_info = Success;
}
} // end namespace Eigen
#endif // EIGEN_INCOMPLETE_LUT_H
| 15,232 | 31.900648 | 134 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H
#define EIGEN_ITERATIVE_SOLVER_BASE_H
namespace Eigen {
namespace internal {
template<typename MatrixType>
struct is_ref_compatible_impl
{
private:
template <typename T0>
struct any_conversion
{
template <typename T> any_conversion(const volatile T&);
template <typename T> any_conversion(T&);
};
struct yes {int a[1];};
struct no {int a[2];};
template<typename T>
static yes test(const Ref<const T>&, int);
template<typename T>
static no test(any_conversion<T>, ...);
public:
static MatrixType ms_from;
enum { value = sizeof(test<MatrixType>(ms_from, 0))==sizeof(yes) };
};
template<typename MatrixType>
struct is_ref_compatible
{
enum { value = is_ref_compatible_impl<typename remove_all<MatrixType>::type>::value };
};
template<typename MatrixType, bool MatrixFree = !internal::is_ref_compatible<MatrixType>::value>
class generic_matrix_wrapper;
// We have an explicit matrix at hand, compatible with Ref<>
template<typename MatrixType>
class generic_matrix_wrapper<MatrixType,false>
{
public:
typedef Ref<const MatrixType> ActualMatrixType;
template<int UpLo> struct ConstSelfAdjointViewReturnType {
typedef typename ActualMatrixType::template ConstSelfAdjointViewReturnType<UpLo>::Type Type;
};
enum {
MatrixFree = false
};
generic_matrix_wrapper()
: m_dummy(0,0), m_matrix(m_dummy)
{}
template<typename InputType>
generic_matrix_wrapper(const InputType &mat)
: m_matrix(mat)
{}
const ActualMatrixType& matrix() const
{
return m_matrix;
}
template<typename MatrixDerived>
void grab(const EigenBase<MatrixDerived> &mat)
{
m_matrix.~Ref<const MatrixType>();
::new (&m_matrix) Ref<const MatrixType>(mat.derived());
}
void grab(const Ref<const MatrixType> &mat)
{
if(&(mat.derived()) != &m_matrix)
{
m_matrix.~Ref<const MatrixType>();
::new (&m_matrix) Ref<const MatrixType>(mat);
}
}
protected:
MatrixType m_dummy; // used to default initialize the Ref<> object
ActualMatrixType m_matrix;
};
// MatrixType is not compatible with Ref<> -> matrix-free wrapper
template<typename MatrixType>
class generic_matrix_wrapper<MatrixType,true>
{
public:
typedef MatrixType ActualMatrixType;
template<int UpLo> struct ConstSelfAdjointViewReturnType
{
typedef ActualMatrixType Type;
};
enum {
MatrixFree = true
};
generic_matrix_wrapper()
: mp_matrix(0)
{}
generic_matrix_wrapper(const MatrixType &mat)
: mp_matrix(&mat)
{}
const ActualMatrixType& matrix() const
{
return *mp_matrix;
}
void grab(const MatrixType &mat)
{
mp_matrix = &mat;
}
protected:
const ActualMatrixType *mp_matrix;
};
}
/** \ingroup IterativeLinearSolvers_Module
* \brief Base class for linear iterative solvers
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
template< typename Derived>
class IterativeSolverBase : public SparseSolverBase<Derived>
{
protected:
typedef SparseSolverBase<Derived> Base;
using Base::m_isInitialized;
public:
typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename internal::traits<Derived>::Preconditioner Preconditioner;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef typename MatrixType::RealScalar RealScalar;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
public:
using Base::derived;
/** Default constructor. */
IterativeSolverBase()
{
init();
}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
*
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
explicit IterativeSolverBase(const EigenBase<MatrixDerived>& A)
: m_matrixWrapper(A.derived())
{
init();
compute(matrix());
}
~IterativeSolverBase() {}
/** Initializes the iterative solver for the sparsity pattern of the matrix \a A for further solving \c Ax=b problems.
*
* Currently, this function mostly calls analyzePattern on the preconditioner. In the future
* we might, for instance, implement column reordering for faster matrix vector products.
*/
template<typename MatrixDerived>
Derived& analyzePattern(const EigenBase<MatrixDerived>& A)
{
grab(A.derived());
m_preconditioner.analyzePattern(matrix());
m_isInitialized = true;
m_analysisIsOk = true;
m_info = m_preconditioner.info();
return derived();
}
/** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems.
*
* Currently, this function mostly calls factorize on the preconditioner.
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
Derived& factorize(const EigenBase<MatrixDerived>& A)
{
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
grab(A.derived());
m_preconditioner.factorize(matrix());
m_factorizationIsOk = true;
m_info = m_preconditioner.info();
return derived();
}
/** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems.
*
* Currently, this function mostly initializes/computes the preconditioner. In the future
* we might, for instance, implement column reordering for faster matrix vector products.
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
Derived& compute(const EigenBase<MatrixDerived>& A)
{
grab(A.derived());
m_preconditioner.compute(matrix());
m_isInitialized = true;
m_analysisIsOk = true;
m_factorizationIsOk = true;
m_info = m_preconditioner.info();
return derived();
}
/** \internal */
Index rows() const { return matrix().rows(); }
/** \internal */
Index cols() const { return matrix().cols(); }
/** \returns the tolerance threshold used by the stopping criteria.
* \sa setTolerance()
*/
RealScalar tolerance() const { return m_tolerance; }
/** Sets the tolerance threshold used by the stopping criteria.
*
* This value is used as an upper bound to the relative residual error: |Ax-b|/|b|.
* The default value is the machine precision given by NumTraits<Scalar>::epsilon()
*/
Derived& setTolerance(const RealScalar& tolerance)
{
m_tolerance = tolerance;
return derived();
}
/** \returns a read-write reference to the preconditioner for custom configuration. */
Preconditioner& preconditioner() { return m_preconditioner; }
/** \returns a read-only reference to the preconditioner. */
const Preconditioner& preconditioner() const { return m_preconditioner; }
/** \returns the max number of iterations.
* It is either the value setted by setMaxIterations or, by default,
* twice the number of columns of the matrix.
*/
Index maxIterations() const
{
return (m_maxIterations<0) ? 2*matrix().cols() : m_maxIterations;
}
/** Sets the max number of iterations.
* Default is twice the number of columns of the matrix.
*/
Derived& setMaxIterations(Index maxIters)
{
m_maxIterations = maxIters;
return derived();
}
/** \returns the number of iterations performed during the last solve */
Index iterations() const
{
eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
return m_iterations;
}
/** \returns the tolerance error reached during the last solve.
* It is a close approximation of the true relative residual error |Ax-b|/|b|.
*/
RealScalar error() const
{
eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
return m_error;
}
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
* and \a x0 as an initial solution.
*
* \sa solve(), compute()
*/
template<typename Rhs,typename Guess>
inline const SolveWithGuess<Derived, Rhs, Guess>
solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
{
eigen_assert(m_isInitialized && "Solver is not initialized.");
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
return SolveWithGuess<Derived, Rhs, Guess>(derived(), b.derived(), x0);
}
/** \returns Success if the iterations converged, and NoConvergence otherwise. */
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
return m_info;
}
/** \internal */
template<typename Rhs, typename DestDerived>
void _solve_impl(const Rhs& b, SparseMatrixBase<DestDerived> &aDest) const
{
eigen_assert(rows()==b.rows());
Index rhsCols = b.cols();
Index size = b.rows();
DestDerived& dest(aDest.derived());
typedef typename DestDerived::Scalar DestScalar;
Eigen::Matrix<DestScalar,Dynamic,1> tb(size);
Eigen::Matrix<DestScalar,Dynamic,1> tx(cols());
// We do not directly fill dest because sparse expressions have to be free of aliasing issue.
// For non square least-square problems, b and dest might not have the same size whereas they might alias each-other.
typename DestDerived::PlainObject tmp(cols(),rhsCols);
for(Index k=0; k<rhsCols; ++k)
{
tb = b.col(k);
tx = derived().solve(tb);
tmp.col(k) = tx.sparseView(0);
}
dest.swap(tmp);
}
protected:
void init()
{
m_isInitialized = false;
m_analysisIsOk = false;
m_factorizationIsOk = false;
m_maxIterations = -1;
m_tolerance = NumTraits<Scalar>::epsilon();
}
typedef internal::generic_matrix_wrapper<MatrixType> MatrixWrapper;
typedef typename MatrixWrapper::ActualMatrixType ActualMatrixType;
const ActualMatrixType& matrix() const
{
return m_matrixWrapper.matrix();
}
template<typename InputType>
void grab(const InputType &A)
{
m_matrixWrapper.grab(A);
}
MatrixWrapper m_matrixWrapper;
Preconditioner m_preconditioner;
Index m_maxIterations;
RealScalar m_tolerance;
mutable RealScalar m_error;
mutable Index m_iterations;
mutable ComputationInfo m_info;
mutable bool m_analysisIsOk, m_factorizationIsOk;
};
} // end namespace Eigen
#endif // EIGEN_ITERATIVE_SOLVER_BASE_H
| 11,527 | 28.18481 | 121 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
#define EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
namespace Eigen {
namespace internal {
/** \internal Low-level conjugate gradient algorithm for least-square problems
* \param mat The matrix A
* \param rhs The right hand side vector b
* \param x On input and initial solution, on output the computed solution.
* \param precond A preconditioner being able to efficiently solve for an
* approximation of A'Ax=b (regardless of b)
* \param iters On input the max number of iteration, on output the number of performed iterations.
* \param tol_error On input the tolerance error, on output an estimation of the relative error.
*/
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
EIGEN_DONT_INLINE
void least_square_conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error)
{
using std::sqrt;
using std::abs;
typedef typename Dest::RealScalar RealScalar;
typedef typename Dest::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> VectorType;
RealScalar tol = tol_error;
Index maxIters = iters;
Index m = mat.rows(), n = mat.cols();
VectorType residual = rhs - mat * x;
VectorType normal_residual = mat.adjoint() * residual;
RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm();
if(rhsNorm2 == 0)
{
x.setZero();
iters = 0;
tol_error = 0;
return;
}
RealScalar threshold = tol*tol*rhsNorm2;
RealScalar residualNorm2 = normal_residual.squaredNorm();
if (residualNorm2 < threshold)
{
iters = 0;
tol_error = sqrt(residualNorm2 / rhsNorm2);
return;
}
VectorType p(n);
p = precond.solve(normal_residual); // initial search direction
VectorType z(n), tmp(m);
RealScalar absNew = numext::real(normal_residual.dot(p)); // the square of the absolute value of r scaled by invM
Index i = 0;
while(i < maxIters)
{
tmp.noalias() = mat * p;
Scalar alpha = absNew / tmp.squaredNorm(); // the amount we travel on dir
x += alpha * p; // update solution
residual -= alpha * tmp; // update residual
normal_residual = mat.adjoint() * residual; // update residual of the normal equation
residualNorm2 = normal_residual.squaredNorm();
if(residualNorm2 < threshold)
break;
z = precond.solve(normal_residual); // approximately solve for "A'A z = normal_residual"
RealScalar absOld = absNew;
absNew = numext::real(normal_residual.dot(z)); // update the absolute value of r
RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
p = z + beta * p; // update search direction
i++;
}
tol_error = sqrt(residualNorm2 / rhsNorm2);
iters = i;
}
}
template< typename _MatrixType,
typename _Preconditioner = LeastSquareDiagonalPreconditioner<typename _MatrixType::Scalar> >
class LeastSquaresConjugateGradient;
namespace internal {
template< typename _MatrixType, typename _Preconditioner>
struct traits<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> >
{
typedef _MatrixType MatrixType;
typedef _Preconditioner Preconditioner;
};
}
/** \ingroup IterativeLinearSolvers_Module
* \brief A conjugate gradient solver for sparse (or dense) least-square problems
*
* This class allows to solve for A x = b linear problems using an iterative conjugate gradient algorithm.
* The matrix A can be non symmetric and rectangular, but the matrix A' A should be positive-definite to guaranty stability.
* Otherwise, the SparseLU or SparseQR classes might be preferable.
* The matrix A and the vectors x and b can be either dense or sparse.
*
* \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
* \tparam _Preconditioner the type of the preconditioner. Default is LeastSquareDiagonalPreconditioner
*
* \implsparsesolverconcept
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
* and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
* and NumTraits<Scalar>::epsilon() for the tolerance.
*
* This class can be used as the direct solver classes. Here is a typical usage example:
\code
int m=1000000, n = 10000;
VectorXd x(n), b(m);
SparseMatrix<double> A(m,n);
// fill A and b
LeastSquaresConjugateGradient<SparseMatrix<double> > lscg;
lscg.compute(A);
x = lscg.solve(b);
std::cout << "#iterations: " << lscg.iterations() << std::endl;
std::cout << "estimated error: " << lscg.error() << std::endl;
// update b, and solve again
x = lscg.solve(b);
\endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
* One can control the start using the solveWithGuess() method.
*
* \sa class ConjugateGradient, SparseLU, SparseQR
*/
template< typename _MatrixType, typename _Preconditioner>
class LeastSquaresConjugateGradient : public IterativeSolverBase<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> >
{
typedef IterativeSolverBase<LeastSquaresConjugateGradient> Base;
using Base::matrix;
using Base::m_error;
using Base::m_iterations;
using Base::m_info;
using Base::m_isInitialized;
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef _Preconditioner Preconditioner;
public:
/** Default constructor. */
LeastSquaresConjugateGradient() : Base() {}
/** Initialize the solver with matrix \a A for further \c Ax=b solving.
*
* This constructor is a shortcut for the default constructor followed
* by a call to compute().
*
* \warning this class stores a reference to the matrix A as well as some
* precomputed values that depend on it. Therefore, if \a A is changed
* this class becomes invalid. Call compute() to update it with the new
* matrix A, or modify a copy of A.
*/
template<typename MatrixDerived>
explicit LeastSquaresConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
~LeastSquaresConjugateGradient() {}
/** \internal */
template<typename Rhs,typename Dest>
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
for(Index j=0; j<b.cols(); ++j)
{
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
internal::least_square_conjugate_gradient(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
}
m_isInitialized = true;
m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
}
/** \internal */
using Base::_solve_impl;
template<typename Rhs,typename Dest>
void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const
{
x.setZero();
_solve_with_guess_impl(b.derived(),x);
}
};
} // end namespace Eigen
#endif // EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
| 7,762 | 34.774194 | 127 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/LU/Determinant.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_DETERMINANT_H
#define EIGEN_DETERMINANT_H
namespace Eigen {
namespace internal {
template<typename Derived>
inline const typename Derived::Scalar bruteforce_det3_helper
(const MatrixBase<Derived>& matrix, int a, int b, int c)
{
return matrix.coeff(0,a)
* (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b));
}
template<typename Derived>
const typename Derived::Scalar bruteforce_det4_helper
(const MatrixBase<Derived>& matrix, int j, int k, int m, int n)
{
return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))
* (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));
}
template<typename Derived,
int DeterminantType = Derived::RowsAtCompileTime
> struct determinant_impl
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
if(Derived::ColsAtCompileTime==Dynamic && m.rows()==0)
return typename traits<Derived>::Scalar(1);
return m.partialPivLu().determinant();
}
};
template<typename Derived> struct determinant_impl<Derived, 1>
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0);
}
};
template<typename Derived> struct determinant_impl<Derived, 2>
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1);
}
};
template<typename Derived> struct determinant_impl<Derived, 3>
{
static inline typename traits<Derived>::Scalar run(const Derived& m)
{
return bruteforce_det3_helper(m,0,1,2)
- bruteforce_det3_helper(m,1,0,2)
+ bruteforce_det3_helper(m,2,0,1);
}
};
template<typename Derived> struct determinant_impl<Derived, 4>
{
static typename traits<Derived>::Scalar run(const Derived& m)
{
// trick by Martin Costabel to compute 4x4 det with only 30 muls
return bruteforce_det4_helper(m,0,1,2,3)
- bruteforce_det4_helper(m,0,2,1,3)
+ bruteforce_det4_helper(m,0,3,1,2)
+ bruteforce_det4_helper(m,1,2,0,3)
- bruteforce_det4_helper(m,1,3,0,2)
+ bruteforce_det4_helper(m,2,3,0,1);
}
};
} // end namespace internal
/** \lu_module
*
* \returns the determinant of this matrix
*/
template<typename Derived>
inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
{
eigen_assert(rows() == cols());
typedef typename internal::nested_eval<Derived,Base::RowsAtCompileTime>::type Nested;
return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived());
}
} // end namespace Eigen
#endif // EIGEN_DETERMINANT_H
| 3,057 | 28.980392 | 97 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/QR/HouseholderQR_LAPACKE.h
|
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Householder QR decomposition of a matrix w/o pivoting based on
* LAPACKE_?geqrf function.
********************************************************************************
*/
#ifndef EIGEN_QR_LAPACKE_H
#define EIGEN_QR_LAPACKE_H
namespace Eigen {
namespace internal {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_QR_NOPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \
template<typename MatrixQR, typename HCoeffs> \
struct householder_qr_inplace_blocked<MatrixQR, HCoeffs, EIGTYPE, true> \
{ \
static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index = 32, \
typename MatrixQR::Scalar* = 0) \
{ \
lapack_int m = (lapack_int) mat.rows(); \
lapack_int n = (lapack_int) mat.cols(); \
lapack_int lda = (lapack_int) mat.outerStride(); \
lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
LAPACKE_##LAPACKE_PREFIX##geqrf( matrix_order, m, n, (LAPACKE_TYPE*)mat.data(), lda, (LAPACKE_TYPE*)hCoeffs.data()); \
hCoeffs.adjointInPlace(); \
} \
};
EIGEN_LAPACKE_QR_NOPIV(double, double, d)
EIGEN_LAPACKE_QR_NOPIV(float, float, s)
EIGEN_LAPACKE_QR_NOPIV(dcomplex, lapack_complex_double, z)
EIGEN_LAPACKE_QR_NOPIV(scomplex, lapack_complex_float, c)
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_QR_LAPACKE_H
| 2,993 | 42.391304 | 122 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SVD/UpperBidiagonalization.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Benoit Jacob <[email protected]>
// Copyright (C) 2013-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BIDIAGONALIZATION_H
#define EIGEN_BIDIAGONALIZATION_H
namespace Eigen {
namespace internal {
// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API.
// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class.
template<typename _MatrixType> class UpperBidiagonalization
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;
typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0, RowMajor> BidiagonalType;
typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType;
typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType;
typedef HouseholderSequence<
const MatrixType,
const typename internal::remove_all<typename Diagonal<const MatrixType,0>::ConjugateReturnType>::type
> HouseholderUSequenceType;
typedef HouseholderSequence<
const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,
Diagonal<const MatrixType,1>,
OnTheRight
> HouseholderVSequenceType;
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via Bidiagonalization::compute(const MatrixType&).
*/
UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {}
explicit UpperBidiagonalization(const MatrixType& matrix)
: m_householder(matrix.rows(), matrix.cols()),
m_bidiagonal(matrix.cols(), matrix.cols()),
m_isInitialized(false)
{
compute(matrix);
}
UpperBidiagonalization& compute(const MatrixType& matrix);
UpperBidiagonalization& computeUnblocked(const MatrixType& matrix);
const MatrixType& householder() const { return m_householder; }
const BidiagonalType& bidiagonal() const { return m_bidiagonal; }
const HouseholderUSequenceType householderU() const
{
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate());
}
const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
{
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())
.setLength(m_householder.cols()-1)
.setShift(1);
}
protected:
MatrixType m_householder;
BidiagonalType m_bidiagonal;
bool m_isInitialized;
};
// Standard upper bidiagonalization without fancy optimizations
// This version should be faster for small matrix size
template<typename MatrixType>
void upperbidiagonalization_inplace_unblocked(MatrixType& mat,
typename MatrixType::RealScalar *diagonal,
typename MatrixType::RealScalar *upper_diagonal,
typename MatrixType::Scalar* tempData = 0)
{
typedef typename MatrixType::Scalar Scalar;
Index rows = mat.rows();
Index cols = mat.cols();
typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixType::MaxRowsAtCompileTime,1> TempType;
TempType tempVector;
if(tempData==0)
{
tempVector.resize(rows);
tempData = tempVector.data();
}
for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
{
Index remainingRows = rows - k;
Index remainingCols = cols - k - 1;
// construct left householder transform in-place in A
mat.col(k).tail(remainingRows)
.makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]);
// apply householder transform to remaining part of A on the left
mat.bottomRightCorner(remainingRows, remainingCols)
.applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData);
if(k == cols-1) break;
// construct right householder transform in-place in mat
mat.row(k).tail(remainingCols)
.makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
// apply householder transform to remaining part of mat on the left
mat.bottomRightCorner(remainingRows-1, remainingCols)
.applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData);
}
}
/** \internal
* Helper routine for the block reduction to upper bidiagonal form.
*
* Let's partition the matrix A:
*
* | A00 A01 |
* A = | |
* | A10 A11 |
*
* This function reduces to bidiagonal form the left \c rows x \a blockSize vertical panel [A00/A10]
* and the \a blockSize x \c cols horizontal panel [A00 A01] of the matrix \a A. The bottom-right block A11
* is updated using matrix-matrix products:
* A22 -= V * Y^T - X * U^T
* where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01
* respectively, and the update matrices X and Y are computed during the reduction.
*
*/
template<typename MatrixType>
void upperbidiagonalization_blocked_helper(MatrixType& A,
typename MatrixType::RealScalar *diagonal,
typename MatrixType::RealScalar *upper_diagonal,
Index bs,
Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
traits<MatrixType>::Flags & RowMajorBit> > X,
Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
traits<MatrixType>::Flags & RowMajorBit> > Y)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename NumTraits<RealScalar>::Literal Literal;
enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride;
typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride;
typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride> SubColumnType;
typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride> SubRowType;
typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder > > SubMatType;
Index brows = A.rows();
Index bcols = A.cols();
Scalar tau_u, tau_u_prev(0), tau_v;
for(Index k = 0; k < bs; ++k)
{
Index remainingRows = brows - k;
Index remainingCols = bcols - k - 1;
SubMatType X_k1( X.block(k,0, remainingRows,k) );
SubMatType V_k1( A.block(k,0, remainingRows,k) );
// 1 - update the k-th column of A
SubColumnType v_k = A.col(k).tail(remainingRows);
v_k -= V_k1 * Y.row(k).head(k).adjoint();
if(k) v_k -= X_k1 * A.col(k).head(k);
// 2 - construct left Householder transform in-place
v_k.makeHouseholderInPlace(tau_v, diagonal[k]);
if(k+1<bcols)
{
SubMatType Y_k ( Y.block(k+1,0, remainingCols, k+1) );
SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) );
// this eases the application of Householder transforAions
// A(k,k) will store tau_v later
A(k,k) = Scalar(1);
// 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k )
{
SubColumnType y_k( Y.col(k).tail(remainingCols) );
// let's use the begining of column k of Y as a temporary vector
SubColumnType tmp( Y.col(k).head(k) );
y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
tmp.noalias() = V_k1.adjoint() * v_k;
y_k.noalias() -= Y_k.leftCols(k) * tmp;
tmp.noalias() = X_k1.adjoint() * v_k;
y_k.noalias() -= U_k1.adjoint() * tmp;
y_k *= numext::conj(tau_v);
}
// 4 - update k-th row of A (it will become u_k)
SubRowType u_k( A.row(k).tail(remainingCols) );
u_k = u_k.conjugate();
{
u_k -= Y_k * A.row(k).head(k+1).adjoint();
if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint();
}
// 5 - construct right Householder transform in-place
u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]);
// this eases the application of Householder transformations
// A(k,k+1) will store tau_u later
A(k,k+1) = Scalar(1);
// 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k )
{
SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
// let's use the begining of column k of X as a temporary vectors
// note that tmp0 and tmp1 overlaps
SubColumnType tmp0 ( X.col(k).head(k) ),
tmp1 ( X.col(k).head(k+1) );
x_k.noalias() = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck
tmp0.noalias() = U_k1 * u_k.transpose();
x_k.noalias() -= X_k1.bottomRows(remainingRows-1) * tmp0;
tmp1.noalias() = Y_k.adjoint() * u_k.transpose();
x_k.noalias() -= A.block(k+1,0, remainingRows-1,k+1) * tmp1;
x_k *= numext::conj(tau_u);
tau_u = numext::conj(tau_u);
u_k = u_k.conjugate();
}
if(k>0) A.coeffRef(k-1,k) = tau_u_prev;
tau_u_prev = tau_u;
}
else
A.coeffRef(k-1,k) = tau_u_prev;
A.coeffRef(k,k) = tau_v;
}
if(bs<bcols)
A.coeffRef(bs-1,bs) = tau_u_prev;
// update A22
if(bcols>bs && brows>bs)
{
SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) );
SubMatType A10( A.block(bs,0, brows-bs,bs) );
SubMatType A01( A.block(0,bs, bs,bcols-bs) );
Scalar tmp = A01(bs-1,0);
A01(bs-1,0) = Literal(1);
A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint();
A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01;
A01(bs-1,0) = tmp;
}
}
/** \internal
*
* Implementation of a block-bidiagonal reduction.
* It is based on the following paper:
* The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form.
* by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995)
* section 3.3
*/
template<typename MatrixType, typename BidiagType>
void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal,
Index maxBlockSize=32,
typename MatrixType::Scalar* /*tempData*/ = 0)
{
typedef typename MatrixType::Scalar Scalar;
typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
Index rows = A.rows();
Index cols = A.cols();
Index size = (std::min)(rows, cols);
// X and Y are work space
enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
Matrix<Scalar,
MatrixType::RowsAtCompileTime,
Dynamic,
StorageOrder,
MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);
Matrix<Scalar,
MatrixType::ColsAtCompileTime,
Dynamic,
StorageOrder,
MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);
Index blockSize = (std::min)(maxBlockSize,size);
Index k = 0;
for(k = 0; k < size; k += blockSize)
{
Index bs = (std::min)(size-k,blockSize); // actual size of the block
Index brows = rows - k; // rows of the block
Index bcols = cols - k; // columns of the block
// partition the matrix A:
//
// | A00 A01 A02 |
// | |
// A = | A10 A11 A12 |
// | |
// | A20 A21 A22 |
//
// where A11 is a bs x bs diagonal block,
// and let:
// | A11 A12 |
// B = | |
// | A21 A22 |
BlockType B = A.block(k,k,brows,bcols);
// This stage performs the bidiagonalization of A11, A21, A12, and updating of A22.
// Finally, the algorithm continue on the updated A22.
//
// However, if B is too small, or A22 empty, then let's use an unblocked strategy
if(k+bs==cols || bcols<48) // somewhat arbitrary threshold
{
upperbidiagonalization_inplace_unblocked(B,
&(bidiagonal.template diagonal<0>().coeffRef(k)),
&(bidiagonal.template diagonal<1>().coeffRef(k)),
X.data()
);
break; // We're done
}
else
{
upperbidiagonalization_blocked_helper<BlockType>( B,
&(bidiagonal.template diagonal<0>().coeffRef(k)),
&(bidiagonal.template diagonal<1>().coeffRef(k)),
bs,
X.topLeftCorner(brows,bs),
Y.topLeftCorner(bcols,bs)
);
}
}
}
template<typename _MatrixType>
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix)
{
Index rows = matrix.rows();
Index cols = matrix.cols();
EIGEN_ONLY_USED_FOR_DEBUG(cols);
eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
m_householder = matrix;
ColVectorType temp(rows);
upperbidiagonalization_inplace_unblocked(m_householder,
&(m_bidiagonal.template diagonal<0>().coeffRef(0)),
&(m_bidiagonal.template diagonal<1>().coeffRef(0)),
temp.data());
m_isInitialized = true;
return *this;
}
template<typename _MatrixType>
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)
{
Index rows = matrix.rows();
Index cols = matrix.cols();
EIGEN_ONLY_USED_FOR_DEBUG(rows);
EIGEN_ONLY_USED_FOR_DEBUG(cols);
eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
m_householder = matrix;
upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal);
m_isInitialized = true;
return *this;
}
#if 0
/** \return the Householder QR decomposition of \c *this.
*
* \sa class Bidiagonalization
*/
template<typename Derived>
const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::bidiagonalization() const
{
return UpperBidiagonalization<PlainObject>(eval());
}
#endif
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_BIDIAGONALIZATION_H
| 15,957 | 37.453012 | 128 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2012 Gael Guennebaud <[email protected]>
/*
NOTE: thes functions vave been adapted from the LDL library:
LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
LDL License:
Your use or distribution of LDL or any modified version of
LDL implies that you agree to this License.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
Permission is hereby granted to use or copy this program under the
terms of the GNU LGPL, provided that the Copyright, this License,
and the Availability of the original version is retained on all copies.
User documentation of any code that uses this code or any modified
version of this code must cite the Copyright, this License, the
Availability note, and "Used by permission." Permission to modify
the code and to distribute modified code is granted, provided the
Copyright, this License, and the Availability note are retained,
and a notice that the code was modified is included.
*/
#include "../Core/util/NonMPL2.h"
#ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
#define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
namespace Eigen {
template<typename Derived>
void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)
{
const StorageIndex size = StorageIndex(ap.rows());
m_matrix.resize(size, size);
m_parent.resize(size);
m_nonZerosPerCol.resize(size);
ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0);
for(StorageIndex k = 0; k < size; ++k)
{
/* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
m_parent[k] = -1; /* parent of k is not yet known */
tags[k] = k; /* mark node k as visited */
m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
{
StorageIndex i = it.index();
if(i < k)
{
/* follow path from i to root of etree, stop at flagged node */
for(; tags[i] != k; i = m_parent[i])
{
/* find parent of i if not yet determined */
if (m_parent[i] == -1)
m_parent[i] = k;
m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */
tags[i] = k; /* mark i as visited */
}
}
}
}
/* construct Lp index array from m_nonZerosPerCol column counts */
StorageIndex* Lp = m_matrix.outerIndexPtr();
Lp[0] = 0;
for(StorageIndex k = 0; k < size; ++k)
Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
m_matrix.resizeNonZeros(Lp[size]);
m_isInitialized = true;
m_info = Success;
m_analysisIsOk = true;
m_factorizationIsOk = false;
}
template<typename Derived>
template<bool DoLDLT>
void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap)
{
using std::sqrt;
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
eigen_assert(ap.rows()==ap.cols());
eigen_assert(m_parent.size()==ap.rows());
eigen_assert(m_nonZerosPerCol.size()==ap.rows());
const StorageIndex size = StorageIndex(ap.rows());
const StorageIndex* Lp = m_matrix.outerIndexPtr();
StorageIndex* Li = m_matrix.innerIndexPtr();
Scalar* Lx = m_matrix.valuePtr();
ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);
ei_declare_aligned_stack_constructed_variable(StorageIndex, pattern, size, 0);
ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0);
bool ok = true;
m_diag.resize(DoLDLT ? size : 0);
for(StorageIndex k = 0; k < size; ++k)
{
// compute nonzero pattern of kth row of L, in topological order
y[k] = 0.0; // Y(0:k) is now all zero
StorageIndex top = size; // stack for pattern is empty
tags[k] = k; // mark node k as visited
m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
{
StorageIndex i = it.index();
if(i <= k)
{
y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */
Index len;
for(len = 0; tags[i] != k; i = m_parent[i])
{
pattern[len++] = i; /* L(k,i) is nonzero */
tags[i] = k; /* mark i as visited */
}
while(len > 0)
pattern[--top] = pattern[--len];
}
}
/* compute numerical values kth row of L (a sparse triangular solve) */
RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k)
y[k] = 0.0;
for(; top < size; ++top)
{
Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */
Scalar yi = y[i]; /* get and clear Y(i) */
y[i] = 0.0;
/* the nonzero entry L(k,i) */
Scalar l_ki;
if(DoLDLT)
l_ki = yi / m_diag[i];
else
yi = l_ki = yi / Lx[Lp[i]];
Index p2 = Lp[i] + m_nonZerosPerCol[i];
Index p;
for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)
y[Li[p]] -= numext::conj(Lx[p]) * yi;
d -= numext::real(l_ki * numext::conj(yi));
Li[p] = k; /* store L(k,i) in column form of L */
Lx[p] = l_ki;
++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */
}
if(DoLDLT)
{
m_diag[k] = d;
if(d == RealScalar(0))
{
ok = false; /* failure, D(k,k) is zero */
break;
}
}
else
{
Index p = Lp[k] + m_nonZerosPerCol[k]++;
Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */
if(d <= RealScalar(0)) {
ok = false; /* failure, matrix is not positive definite */
break;
}
Lx[p] = sqrt(d) ;
}
}
m_info = ok ? Success : NumericalIssue;
m_factorizationIsOk = true;
}
} // end namespace Eigen
#endif // EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
| 6,898 | 33.495 | 128 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCore/CompressedStorage.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_COMPRESSED_STORAGE_H
#define EIGEN_COMPRESSED_STORAGE_H
namespace Eigen {
namespace internal {
/** \internal
* Stores a sparse set of values as a list of values and a list of indices.
*
*/
template<typename _Scalar,typename _StorageIndex>
class CompressedStorage
{
public:
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
protected:
typedef typename NumTraits<Scalar>::Real RealScalar;
public:
CompressedStorage()
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{}
explicit CompressedStorage(Index size)
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{
resize(size);
}
CompressedStorage(const CompressedStorage& other)
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{
*this = other;
}
CompressedStorage& operator=(const CompressedStorage& other)
{
resize(other.size());
if(other.size()>0)
{
internal::smart_copy(other.m_values, other.m_values + m_size, m_values);
internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
}
return *this;
}
void swap(CompressedStorage& other)
{
std::swap(m_values, other.m_values);
std::swap(m_indices, other.m_indices);
std::swap(m_size, other.m_size);
std::swap(m_allocatedSize, other.m_allocatedSize);
}
~CompressedStorage()
{
delete[] m_values;
delete[] m_indices;
}
void reserve(Index size)
{
Index newAllocatedSize = m_size + size;
if (newAllocatedSize > m_allocatedSize)
reallocate(newAllocatedSize);
}
void squeeze()
{
if (m_allocatedSize>m_size)
reallocate(m_size);
}
void resize(Index size, double reserveSizeFactor = 0)
{
if (m_allocatedSize<size)
{
Index realloc_size = (std::min<Index>)(NumTraits<StorageIndex>::highest(), size + Index(reserveSizeFactor*double(size)));
if(realloc_size<size)
internal::throw_std_bad_alloc();
reallocate(realloc_size);
}
m_size = size;
}
void append(const Scalar& v, Index i)
{
Index id = m_size;
resize(m_size+1, 1);
m_values[id] = v;
m_indices[id] = internal::convert_index<StorageIndex>(i);
}
inline Index size() const { return m_size; }
inline Index allocatedSize() const { return m_allocatedSize; }
inline void clear() { m_size = 0; }
const Scalar* valuePtr() const { return m_values; }
Scalar* valuePtr() { return m_values; }
const StorageIndex* indexPtr() const { return m_indices; }
StorageIndex* indexPtr() { return m_indices; }
inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; }
inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; }
inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
inline Index searchLowerIndex(Index key) const
{
return searchLowerIndex(0, m_size, key);
}
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
inline Index searchLowerIndex(Index start, Index end, Index key) const
{
while(end>start)
{
Index mid = (end+start)>>1;
if (m_indices[mid]<key)
start = mid+1;
else
end = mid;
}
return start;
}
/** \returns the stored value at index \a key
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const
{
if (m_size==0)
return defaultValue;
else if (key==m_indices[m_size-1])
return m_values[m_size-1];
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
const Index id = searchLowerIndex(0,m_size-1,key);
return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
}
/** Like at(), but the search is performed in the range [start,end) */
inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const
{
if (start>=end)
return defaultValue;
else if (end>start && key==m_indices[end-1])
return m_values[end-1];
// ^^ optimization: let's first check if it is the last coefficient
// (very common in high level algorithms)
const Index id = searchLowerIndex(start,end-1,key);
return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
}
/** \returns a reference to the value at index \a key
* If the value does not exist, then the value \a defaultValue is inserted
* such that the keys are sorted. */
inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
{
Index id = searchLowerIndex(0,m_size,key);
if (id>=m_size || m_indices[id]!=key)
{
if (m_allocatedSize<m_size+1)
{
m_allocatedSize = 2*(m_size+1);
internal::scoped_array<Scalar> newValues(m_allocatedSize);
internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);
// copy first chunk
internal::smart_copy(m_values, m_values +id, newValues.ptr());
internal::smart_copy(m_indices, m_indices+id, newIndices.ptr());
// copy the rest
if(m_size>id)
{
internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1);
internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1);
}
std::swap(m_values,newValues.ptr());
std::swap(m_indices,newIndices.ptr());
}
else if(m_size>id)
{
internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1);
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
}
m_size++;
m_indices[id] = internal::convert_index<StorageIndex>(key);
m_values[id] = defaultValue;
}
return m_values[id];
}
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
Index k = 0;
Index n = size();
for (Index i=0; i<n; ++i)
{
if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
{
value(k) = value(i);
index(k) = index(i);
++k;
}
}
resize(k,0);
}
protected:
inline void reallocate(Index size)
{
#ifdef EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
#endif
eigen_internal_assert(size!=m_allocatedSize);
internal::scoped_array<Scalar> newValues(size);
internal::scoped_array<StorageIndex> newIndices(size);
Index copySize = (std::min)(size, m_size);
if (copySize>0) {
internal::smart_copy(m_values, m_values+copySize, newValues.ptr());
internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());
}
std::swap(m_values,newValues.ptr());
std::swap(m_indices,newIndices.ptr());
m_allocatedSize = size;
}
protected:
Scalar* m_values;
StorageIndex* m_indices;
Index m_size;
Index m_allocatedSize;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_COMPRESSED_STORAGE_H
| 8,164 | 30.525097 | 130 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
#define EIGEN_SPARSE_CWISE_UNARY_OP_H
namespace Eigen {
namespace internal {
template<typename UnaryOp, typename ArgType>
struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
: public evaluator_base<CwiseUnaryOp<UnaryOp,ArgType> >
{
public:
typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
class InnerIterator;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_argImpl.nonZerosEstimate();
}
protected:
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
const UnaryOp m_functor;
evaluator<ArgType> m_argImpl;
};
template<typename UnaryOp, typename ArgType>
class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator
: public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator
{
typedef typename XprType::Scalar Scalar;
typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
: Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{ Base::operator++(); return *this; }
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
protected:
const UnaryOp m_functor;
private:
Scalar& valueRef();
};
template<typename ViewOp, typename ArgType>
struct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>
: public evaluator_base<CwiseUnaryView<ViewOp,ArgType> >
{
public:
typedef CwiseUnaryView<ViewOp, ArgType> XprType;
class InnerIterator;
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<ViewOp>::Cost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())
{
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<ViewOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
protected:
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
const ViewOp m_functor;
evaluator<ArgType> m_argImpl;
};
template<typename ViewOp, typename ArgType>
class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator
: public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator
{
typedef typename XprType::Scalar Scalar;
typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
: Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
{}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{ Base::operator++(); return *this; }
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(Base::valueRef()); }
protected:
const ViewOp m_functor;
};
} // end namespace internal
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator*=(const Scalar& other)
{
typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;
internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
for (EvalIterator i(thisEval,j); i; ++i)
i.valueRef() *= other;
return derived();
}
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator/=(const Scalar& other)
{
typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;
internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
for (EvalIterator i(thisEval,j); i; ++i)
i.valueRef() /= other;
return derived();
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
| 4,711 | 30.624161 | 107 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCore/SparseDot.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_DOT_H
#define EIGEN_SPARSE_DOT_H
namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
eigen_assert(size() == other.size());
eigen_assert(other.size()>0 && "you are using a non initialized vector");
internal::evaluator<Derived> thisEval(derived());
typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
Scalar res(0);
while (i)
{
res += numext::conj(i.value()) * other.coeff(i.index());
++i;
}
return res;
}
template<typename Derived>
template<typename OtherDerived>
typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
eigen_assert(size() == other.size());
internal::evaluator<Derived> thisEval(derived());
typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
internal::evaluator<OtherDerived> otherEval(other.derived());
typename internal::evaluator<OtherDerived>::InnerIterator j(otherEval, 0);
Scalar res(0);
while (i && j)
{
if (i.index()==j.index())
{
res += numext::conj(i.value()) * j.value();
++i; ++j;
}
else if (i.index()<j.index())
++i;
else
++j;
}
return res;
}
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::squaredNorm() const
{
return numext::real((*this).cwiseAbs2().sum());
}
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::norm() const
{
using std::sqrt;
return sqrt(squaredNorm());
}
template<typename Derived>
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
SparseMatrixBase<Derived>::blueNorm() const
{
return internal::blueNorm_impl(*this);
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_DOT_H
| 3,080 | 30.121212 | 118 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCore/SparseFuzzy.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_FUZZY_H
#define EIGEN_SPARSE_FUZZY_H
namespace Eigen {
template<typename Derived>
template<typename OtherDerived>
bool SparseMatrixBase<Derived>::isApprox(const SparseMatrixBase<OtherDerived>& other, const RealScalar &prec) const
{
const typename internal::nested_eval<Derived,2,PlainObject>::type actualA(derived());
typename internal::conditional<bool(IsRowMajor)==bool(OtherDerived::IsRowMajor),
const typename internal::nested_eval<OtherDerived,2,PlainObject>::type,
const PlainObject>::type actualB(other.derived());
return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm());
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_FUZZY_H
| 1,107 | 35.933333 | 119 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCore/SparseRedux.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEREDUX_H
#define EIGEN_SPARSEREDUX_H
namespace Eigen {
template<typename Derived>
typename internal::traits<Derived>::Scalar
SparseMatrixBase<Derived>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
Scalar res(0);
internal::evaluator<Derived> thisEval(derived());
for (Index j=0; j<outerSize(); ++j)
for (typename internal::evaluator<Derived>::InnerIterator iter(thisEval,j); iter; ++iter)
res += iter.value();
return res;
}
template<typename _Scalar, int _Options, typename _Index>
typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
SparseMatrix<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
if(this->isCompressed())
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
else
return Base::sum();
}
template<typename _Scalar, int _Options, typename _Index>
typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
SparseVector<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
}
} // end namespace Eigen
#endif // EIGEN_SPARSEREDUX_H
| 1,699 | 33 | 93 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCore/SparseSolverBase.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSESOLVERBASE_H
#define EIGEN_SPARSESOLVERBASE_H
namespace Eigen {
namespace internal {
/** \internal
* Helper functions to solve with a sparse right-hand-side and result.
* The rhs is decomposed into small vertical panels which are solved through dense temporaries.
*/
template<typename Decomposition, typename Rhs, typename Dest>
typename enable_if<Rhs::ColsAtCompileTime!=1 && Dest::ColsAtCompileTime!=1>::type
solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
{
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
typedef typename Dest::Scalar DestScalar;
// we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
static const Index NbColsAtOnce = 4;
Index rhsCols = rhs.cols();
Index size = rhs.rows();
// the temporary matrices do not need more columns than NbColsAtOnce:
Index tmpCols = (std::min)(rhsCols, NbColsAtOnce);
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols);
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols);
for(Index k=0; k<rhsCols; k+=NbColsAtOnce)
{
Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce);
tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols);
tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols));
dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
}
}
// Overload for vector as rhs
template<typename Decomposition, typename Rhs, typename Dest>
typename enable_if<Rhs::ColsAtCompileTime==1 || Dest::ColsAtCompileTime==1>::type
solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
{
typedef typename Dest::Scalar DestScalar;
Index size = rhs.rows();
Eigen::Matrix<DestScalar,Dynamic,1> rhs_dense(rhs);
Eigen::Matrix<DestScalar,Dynamic,1> dest_dense(size);
dest_dense = dec.solve(rhs_dense);
dest = dest_dense.sparseView();
}
} // end namespace internal
/** \class SparseSolverBase
* \ingroup SparseCore_Module
* \brief A base class for sparse solvers
*
* \tparam Derived the actual type of the solver.
*
*/
template<typename Derived>
class SparseSolverBase : internal::noncopyable
{
public:
/** Default constructor */
SparseSolverBase()
: m_isInitialized(false)
{}
~SparseSolverBase()
{}
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
/** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* \sa compute()
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "Solver is not initialized.");
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
return Solve<Derived, Rhs>(derived(), b.derived());
}
/** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* \sa compute()
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const SparseMatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "Solver is not initialized.");
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
return Solve<Derived, Rhs>(derived(), b.derived());
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal default implementation of solving with a sparse rhs */
template<typename Rhs,typename Dest>
void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const
{
internal::solve_sparse_through_dense_panels(derived(), b.derived(), dest.derived());
}
#endif // EIGEN_PARSED_BY_DOXYGEN
protected:
mutable bool m_isInitialized;
};
} // end namespace Eigen
#endif // EIGEN_SPARSESOLVERBASE_H
| 4,424 | 34.4 | 116 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCore/SparseTranspose.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSETRANSPOSE_H
#define EIGEN_SPARSETRANSPOSE_H
namespace Eigen {
namespace internal {
template<typename MatrixType,int CompressedAccess=int(MatrixType::Flags&CompressedAccessBit)>
class SparseTransposeImpl
: public SparseMatrixBase<Transpose<MatrixType> >
{};
template<typename MatrixType>
class SparseTransposeImpl<MatrixType,CompressedAccessBit>
: public SparseCompressedBase<Transpose<MatrixType> >
{
typedef SparseCompressedBase<Transpose<MatrixType> > Base;
public:
using Base::derived;
typedef typename Base::Scalar Scalar;
typedef typename Base::StorageIndex StorageIndex;
inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
inline const Scalar* valuePtr() const { return derived().nestedExpression().valuePtr(); }
inline const StorageIndex* innerIndexPtr() const { return derived().nestedExpression().innerIndexPtr(); }
inline const StorageIndex* outerIndexPtr() const { return derived().nestedExpression().outerIndexPtr(); }
inline const StorageIndex* innerNonZeroPtr() const { return derived().nestedExpression().innerNonZeroPtr(); }
inline Scalar* valuePtr() { return derived().nestedExpression().valuePtr(); }
inline StorageIndex* innerIndexPtr() { return derived().nestedExpression().innerIndexPtr(); }
inline StorageIndex* outerIndexPtr() { return derived().nestedExpression().outerIndexPtr(); }
inline StorageIndex* innerNonZeroPtr() { return derived().nestedExpression().innerNonZeroPtr(); }
};
}
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
: public internal::SparseTransposeImpl<MatrixType>
{
protected:
typedef internal::SparseTransposeImpl<MatrixType> Base;
};
namespace internal {
template<typename ArgType>
struct unary_evaluator<Transpose<ArgType>, IteratorBased>
: public evaluator_base<Transpose<ArgType> >
{
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
public:
typedef Transpose<ArgType> XprType;
inline Index nonZerosEstimate() const {
return m_argImpl.nonZerosEstimate();
}
class InnerIterator : public EvalIterator
{
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
: EvalIterator(unaryOp.m_argImpl,outer)
{}
Index row() const { return EvalIterator::col(); }
Index col() const { return EvalIterator::row(); }
};
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& op) :m_argImpl(op.nestedExpression()) {}
protected:
evaluator<ArgType> m_argImpl;
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSETRANSPOSE_H
| 3,175 | 33.150538 | 113 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCore/SparseTriangularView.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2015 Gael Guennebaud <[email protected]>
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
#define EIGEN_SPARSE_TRIANGULARVIEW_H
namespace Eigen {
/** \ingroup SparseCore_Module
*
* \brief Base class for a triangular part in a \b sparse matrix
*
* This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated.
* It extends class TriangularView with additional methods which are available for sparse expressions only.
*
* \sa class TriangularView, SparseMatrixBase::triangularView()
*/
template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<MatrixType,Mode,Sparse>
: public SparseMatrixBase<TriangularView<MatrixType,Mode> >
{
enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
|| ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)),
SkipLast = !SkipFirst,
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
};
typedef TriangularView<MatrixType,Mode> TriangularViewType;
protected:
// dummy solve function to make TriangularView happy.
void solve() const;
typedef SparseMatrixBase<TriangularViewType> Base;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType)
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const {
if(!(internal::is_same<RhsType,DstType>::value && internal::extract_data(dst) == internal::extract_data(rhs)))
dst = rhs;
this->solveInPlace(dst);
}
/** Applies the inverse of \c *this to the dense vector or matrix \a other, "in-place" */
template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
/** Applies the inverse of \c *this to the sparse vector or matrix \a other, "in-place" */
template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
};
namespace internal {
template<typename ArgType, unsigned int Mode>
struct unary_evaluator<TriangularView<ArgType,Mode>, IteratorBased>
: evaluator_base<TriangularView<ArgType,Mode> >
{
typedef TriangularView<ArgType,Mode> XprType;
protected:
typedef typename XprType::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit))
|| ((Mode&Upper) && (ArgType::Flags&RowMajorBit)),
SkipLast = !SkipFirst,
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
};
public:
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()), m_arg(xpr.nestedExpression()) {}
inline Index nonZerosEstimate() const {
return m_argImpl.nonZerosEstimate();
}
class InnerIterator : public EvalIterator
{
typedef EvalIterator Base;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer)
: Base(xprEval.m_argImpl,outer), m_returnOne(false), m_containsDiag(Base::outer()<xprEval.m_arg.innerSize())
{
if(SkipFirst)
{
while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
Base::operator++();
if(HasUnitDiag)
m_returnOne = m_containsDiag;
}
else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
{
if((!SkipFirst) && Base::operator bool())
Base::operator++();
m_returnOne = m_containsDiag;
}
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
if(HasUnitDiag && m_returnOne)
m_returnOne = false;
else
{
Base::operator++();
if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
{
if((!SkipFirst) && Base::operator bool())
Base::operator++();
m_returnOne = m_containsDiag;
}
}
return *this;
}
EIGEN_STRONG_INLINE operator bool() const
{
if(HasUnitDiag && m_returnOne)
return true;
if(SkipFirst) return Base::operator bool();
else
{
if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
else return (Base::operator bool() && this->index() <= this->outer());
}
}
// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); }
// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); }
inline StorageIndex index() const
{
if(HasUnitDiag && m_returnOne) return internal::convert_index<StorageIndex>(Base::outer());
else return Base::index();
}
inline Scalar value() const
{
if(HasUnitDiag && m_returnOne) return Scalar(1);
else return Base::value();
}
protected:
bool m_returnOne;
bool m_containsDiag;
private:
Scalar& valueRef();
};
protected:
evaluator<ArgType> m_argImpl;
const ArgType& m_arg;
};
} // end namespace internal
template<typename Derived>
template<int Mode>
inline const TriangularView<const Derived, Mode>
SparseMatrixBase<Derived>::triangularView() const
{
return TriangularView<const Derived, Mode>(derived());
}
} // end namespace Eigen
#endif // EIGEN_SPARSE_TRIANGULARVIEW_H
| 6,435 | 32.873684 | 128 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCore/SparseVector.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEVECTOR_H
#define EIGEN_SPARSEVECTOR_H
namespace Eigen {
/** \ingroup SparseCore_Module
* \class SparseVector
*
* \brief a sparse vector class
*
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
*
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
*/
namespace internal {
template<typename _Scalar, int _Options, typename _StorageIndex>
struct traits<SparseVector<_Scalar, _Options, _StorageIndex> >
{
typedef _Scalar Scalar;
typedef _StorageIndex StorageIndex;
typedef Sparse StorageKind;
typedef MatrixXpr XprKind;
enum {
IsColVector = (_Options & RowMajorBit) ? 0 : 1,
RowsAtCompileTime = IsColVector ? Dynamic : 1,
ColsAtCompileTime = IsColVector ? 1 : Dynamic,
MaxRowsAtCompileTime = RowsAtCompileTime,
MaxColsAtCompileTime = ColsAtCompileTime,
Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
// Sparse-Vector-Assignment kinds:
enum {
SVA_RuntimeSwitch,
SVA_Inner,
SVA_Outer
};
template< typename Dest, typename Src,
int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
: Src::InnerSizeAtCompileTime==1 ? SVA_Outer
: SVA_Inner>
struct sparse_vector_assign_selector;
}
template<typename _Scalar, int _Options, typename _StorageIndex>
class SparseVector
: public SparseCompressedBase<SparseVector<_Scalar, _Options, _StorageIndex> >
{
typedef SparseCompressedBase<SparseVector> Base;
using Base::convert_index;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
enum { IsColVector = internal::traits<SparseVector>::IsColVector };
enum {
Options = _Options
};
EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }
EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }
EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
inline const StorageIndex* outerIndexPtr() const { return 0; }
inline StorageIndex* outerIndexPtr() { return 0; }
inline const StorageIndex* innerNonZeroPtr() const { return 0; }
inline StorageIndex* innerNonZeroPtr() { return 0; }
/** \internal */
inline Storage& data() { return m_data; }
/** \internal */
inline const Storage& data() const { return m_data; }
inline Scalar coeff(Index row, Index col) const
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
return coeff(IsColVector ? row : col);
}
inline Scalar coeff(Index i) const
{
eigen_assert(i>=0 && i<m_size);
return m_data.at(StorageIndex(i));
}
inline Scalar& coeffRef(Index row, Index col)
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
return coeffRef(IsColVector ? row : col);
}
/** \returns a reference to the coefficient value at given index \a i
* This operation involes a log(rho*size) binary search. If the coefficient does not
* exist yet, then a sorted insertion into a sequential buffer is performed.
*
* This insertion might be very costly if the number of nonzeros above \a i is large.
*/
inline Scalar& coeffRef(Index i)
{
eigen_assert(i>=0 && i<m_size);
return m_data.atWithInsertion(StorageIndex(i));
}
public:
typedef typename Base::InnerIterator InnerIterator;
typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
inline void setZero() { m_data.clear(); }
/** \returns the number of non zero coefficients */
inline Index nonZeros() const { return m_data.size(); }
inline void startVec(Index outer)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
}
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
return insertBack(inner);
}
inline Scalar& insertBack(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
{
EIGEN_UNUSED_VARIABLE(outer);
eigen_assert(outer==0);
return insertBackUnordered(inner);
}
inline Scalar& insertBackUnordered(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
inline Scalar& insert(Index row, Index col)
{
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
Index inner = IsColVector ? row : col;
Index outer = IsColVector ? col : row;
EIGEN_ONLY_USED_FOR_DEBUG(outer);
eigen_assert(outer==0);
return insert(inner);
}
Scalar& insert(Index i)
{
eigen_assert(i>=0 && i<m_size);
Index startId = 0;
Index p = Index(m_data.size()) - 1;
// TODO smart realloc
m_data.resize(p+2,1);
while ( (p >= startId) && (m_data.index(p) > i) )
{
m_data.index(p+1) = m_data.index(p);
m_data.value(p+1) = m_data.value(p);
--p;
}
m_data.index(p+1) = convert_index(i);
m_data.value(p+1) = 0;
return m_data.value(p+1);
}
/**
*/
inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
inline void finalize() {}
/** \copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
m_data.prune(reference,epsilon);
}
/** Resizes the sparse vector to \a rows x \a cols
*
* This method is provided for compatibility with matrices.
* For a column vector, \a cols must be equal to 1.
* For a row vector, \a rows must be equal to 1.
*
* \sa resize(Index)
*/
void resize(Index rows, Index cols)
{
eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1");
resize(IsColVector ? rows : cols);
}
/** Resizes the sparse vector to \a newSize
* This method deletes all entries, thus leaving an empty sparse vector
*
* \sa conservativeResize(), setZero() */
void resize(Index newSize)
{
m_size = newSize;
m_data.clear();
}
/** Resizes the sparse vector to \a newSize, while leaving old values untouched.
*
* If the size of the vector is decreased, then the storage of the out-of bounds coefficients is kept and reserved.
* Call .data().squeeze() to free extra memory.
*
* \sa reserve(), setZero()
*/
void conservativeResize(Index newSize)
{
if (newSize < m_size)
{
Index i = 0;
while (i<m_data.size() && m_data.index(i)<newSize) ++i;
m_data.resize(i);
}
m_size = newSize;
}
void resizeNonZeros(Index size) { m_data.resize(size); }
inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
explicit inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
template<typename OtherDerived>
inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
: m_size(0)
{
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#endif
check_template_parameters();
*this = other.derived();
}
inline SparseVector(const SparseVector& other)
: Base(other), m_size(0)
{
check_template_parameters();
*this = other.derived();
}
/** Swaps the values of \c *this and \a other.
* Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
* \sa SparseMatrixBase::swap()
*/
inline void swap(SparseVector& other)
{
std::swap(m_size, other.m_size);
m_data.swap(other.m_data);
}
template<int OtherOptions>
inline void swap(SparseMatrix<Scalar,OtherOptions,StorageIndex>& other)
{
eigen_assert(other.outerSize()==1);
std::swap(m_size, other.m_innerSize);
m_data.swap(other.m_data);
}
inline SparseVector& operator=(const SparseVector& other)
{
if (other.isRValue())
{
swap(other.const_cast_derived());
}
else
{
resize(other.size());
m_data = other.m_data;
}
return *this;
}
template<typename OtherDerived>
inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
{
SparseVector tmp(other.size());
internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());
this->swap(tmp);
return *this;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Lhs, typename Rhs>
inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
{
return Base::operator=(product);
}
#endif
friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
{
for (Index i=0; i<m.nonZeros(); ++i)
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
s << std::endl;
return s;
}
/** Destructor */
inline ~SparseVector() {}
/** Overloaded for performance */
Scalar sum() const;
public:
/** \internal \deprecated use setZero() and reserve() */
EIGEN_DEPRECATED void startFill(Index reserve)
{
setZero();
m_data.reserve(reserve);
}
/** \internal \deprecated use insertBack(Index,Index) */
EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
{
eigen_assert(r==0 || c==0);
return fill(IsColVector ? r : c);
}
/** \internal \deprecated use insertBack(Index) */
EIGEN_DEPRECATED Scalar& fill(Index i)
{
m_data.append(0, i);
return m_data.value(m_data.size()-1);
}
/** \internal \deprecated use insert(Index,Index) */
EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
{
eigen_assert(r==0 || c==0);
return fillrand(IsColVector ? r : c);
}
/** \internal \deprecated use insert(Index) */
EIGEN_DEPRECATED Scalar& fillrand(Index i)
{
return insert(i);
}
/** \internal \deprecated use finalize() */
EIGEN_DEPRECATED void endFill() {}
// These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.
/** \internal \deprecated use data() */
EIGEN_DEPRECATED Storage& _data() { return m_data; }
/** \internal \deprecated use data() */
EIGEN_DEPRECATED const Storage& _data() const { return m_data; }
# ifdef EIGEN_SPARSEVECTOR_PLUGIN
# include EIGEN_SPARSEVECTOR_PLUGIN
# endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
Storage m_data;
Index m_size;
};
namespace internal {
template<typename _Scalar, int _Options, typename _Index>
struct evaluator<SparseVector<_Scalar,_Options,_Index> >
: evaluator_base<SparseVector<_Scalar,_Options,_Index> >
{
typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType;
typedef evaluator_base<SparseVectorType> Base;
typedef typename SparseVectorType::InnerIterator InnerIterator;
typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
enum {
CoeffReadCost = NumTraits<_Scalar>::ReadCost,
Flags = SparseVectorType::Flags
};
evaluator() : Base() {}
explicit evaluator(const SparseVectorType &mat) : m_matrix(&mat)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const {
return m_matrix->nonZeros();
}
operator SparseVectorType&() { return m_matrix->const_cast_derived(); }
operator const SparseVectorType&() const { return *m_matrix; }
const SparseVectorType *m_matrix;
};
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
static void run(Dest& dst, const Src& src) {
eigen_internal_assert(src.innerSize()==src.size());
typedef internal::evaluator<Src> SrcEvaluatorType;
SrcEvaluatorType srcEval(src);
for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it)
dst.insert(it.index()) = it.value();
}
};
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
static void run(Dest& dst, const Src& src) {
eigen_internal_assert(src.outerSize()==src.size());
typedef internal::evaluator<Src> SrcEvaluatorType;
SrcEvaluatorType srcEval(src);
for(Index i=0; i<src.size(); ++i)
{
typename SrcEvaluatorType::InnerIterator it(srcEval, i);
if(it)
dst.insert(i) = it.value();
}
}
};
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
static void run(Dest& dst, const Src& src) {
if(src.outerSize()==1) sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
else sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
}
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSEVECTOR_H
| 14,831 | 29.964509 | 120 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseCore/SparseView.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011-2014 Gael Guennebaud <[email protected]>
// Copyright (C) 2010 Daniel Lowengrub <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEVIEW_H
#define EIGEN_SPARSEVIEW_H
namespace Eigen {
namespace internal {
template<typename MatrixType>
struct traits<SparseView<MatrixType> > : traits<MatrixType>
{
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Sparse StorageKind;
enum {
Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
};
};
} // end namespace internal
/** \ingroup SparseCore_Module
* \class SparseView
*
* \brief Expression of a dense or sparse matrix with zero or too small values removed
*
* \tparam MatrixType the type of the object of which we are removing the small entries
*
* This class represents an expression of a given dense or sparse matrix with
* entries smaller than \c reference * \c epsilon are removed.
* It is the return type of MatrixBase::sparseView() and SparseMatrixBase::pruned()
* and most of the time this is the only way it is used.
*
* \sa MatrixBase::sparseView(), SparseMatrixBase::pruned()
*/
template<typename MatrixType>
class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
{
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
typedef SparseMatrixBase<SparseView > Base;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0),
const RealScalar &epsilon = NumTraits<Scalar>::dummy_precision())
: m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {}
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
inline Index innerSize() const { return m_matrix.innerSize(); }
inline Index outerSize() const { return m_matrix.outerSize(); }
/** \returns the nested expression */
const typename internal::remove_all<MatrixTypeNested>::type&
nestedExpression() const { return m_matrix; }
Scalar reference() const { return m_reference; }
RealScalar epsilon() const { return m_epsilon; }
protected:
MatrixTypeNested m_matrix;
Scalar m_reference;
RealScalar m_epsilon;
};
namespace internal {
// TODO find a way to unify the two following variants
// This is tricky because implementing an inner iterator on top of an IndexBased evaluator is
// not easy because the evaluators do not expose the sizes of the underlying expression.
template<typename ArgType>
struct unary_evaluator<SparseView<ArgType>, IteratorBased>
: public evaluator_base<SparseView<ArgType> >
{
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
public:
typedef SparseView<ArgType> XprType;
class InnerIterator : public EvalIterator
{
typedef typename XprType::Scalar Scalar;
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
: EvalIterator(sve.m_argImpl,outer), m_view(sve.m_view)
{
incrementToNonZero();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
EvalIterator::operator++();
incrementToNonZero();
return *this;
}
using EvalIterator::value;
protected:
const XprType &m_view;
private:
void incrementToNonZero()
{
while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon()))
{
EvalIterator::operator++();
}
}
};
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
protected:
evaluator<ArgType> m_argImpl;
const XprType &m_view;
};
template<typename ArgType>
struct unary_evaluator<SparseView<ArgType>, IndexBased>
: public evaluator_base<SparseView<ArgType> >
{
public:
typedef SparseView<ArgType> XprType;
protected:
enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
typedef typename XprType::Scalar Scalar;
typedef typename XprType::StorageIndex StorageIndex;
public:
class InnerIterator
{
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
: m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize())
{
incrementToNonZero();
}
EIGEN_STRONG_INLINE InnerIterator& operator++()
{
m_inner++;
incrementToNonZero();
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const
{
return (IsRowMajor) ? m_sve.m_argImpl.coeff(m_outer, m_inner)
: m_sve.m_argImpl.coeff(m_inner, m_outer);
}
EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; }
inline Index row() const { return IsRowMajor ? m_outer : index(); }
inline Index col() const { return IsRowMajor ? index() : m_outer; }
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
protected:
const unary_evaluator &m_sve;
Index m_inner;
const Index m_outer;
const Index m_end;
private:
void incrementToNonZero()
{
while((bool(*this)) && internal::isMuchSmallerThan(value(), m_sve.m_view.reference(), m_sve.m_view.epsilon()))
{
m_inner++;
}
}
};
enum {
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
Flags = XprType::Flags
};
explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
protected:
evaluator<ArgType> m_argImpl;
const XprType &m_view;
};
} // end namespace internal
/** \ingroup SparseCore_Module
*
* \returns a sparse expression of the dense expression \c *this with values smaller than
* \a reference * \a epsilon removed.
*
* This method is typically used when prototyping to convert a quickly assembled dense Matrix \c D to a SparseMatrix \c S:
* \code
* MatrixXd D(n,m);
* SparseMatrix<double> S;
* S = D.sparseView(); // suppress numerical zeros (exact)
* S = D.sparseView(reference);
* S = D.sparseView(reference,epsilon);
* \endcode
* where \a reference is a meaningful non zero reference value,
* and \a epsilon is a tolerance factor defaulting to NumTraits<Scalar>::dummy_precision().
*
* \sa SparseMatrixBase::pruned(), class SparseView */
template<typename Derived>
const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& reference,
const typename NumTraits<Scalar>::Real& epsilon) const
{
return SparseView<Derived>(derived(), reference, epsilon);
}
/** \returns an expression of \c *this with values smaller than
* \a reference * \a epsilon removed.
*
* This method is typically used in conjunction with the product of two sparse matrices
* to automatically prune the smallest values as follows:
* \code
* C = (A*B).pruned(); // suppress numerical zeros (exact)
* C = (A*B).pruned(ref);
* C = (A*B).pruned(ref,epsilon);
* \endcode
* where \c ref is a meaningful non zero reference value.
* */
template<typename Derived>
const SparseView<Derived>
SparseMatrixBase<Derived>::pruned(const Scalar& reference,
const RealScalar& epsilon) const
{
return SparseView<Derived>(derived(), reference, epsilon);
}
} // end namespace Eigen
#endif
| 8,110 | 30.933071 | 123 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseLU/SparseLU_Structs.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file comes from a partly modified version of files slu_[s,d,c,z]defs.h
* -- SuperLU routine (version 4.1) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* November, 2010
*
* Global data structures used in LU factorization -
*
* nsuper: #supernodes = nsuper + 1, numbered [0, nsuper].
* (xsup,supno): supno[i] is the supernode no to which i belongs;
* xsup(s) points to the beginning of the s-th supernode.
* e.g. supno 0 1 2 2 3 3 3 4 4 4 4 4 (n=12)
* xsup 0 1 2 4 7 12
* Note: dfs will be performed on supernode rep. relative to the new
* row pivoting ordering
*
* (xlsub,lsub): lsub[*] contains the compressed subscript of
* rectangular supernodes; xlsub[j] points to the starting
* location of the j-th column in lsub[*]. Note that xlsub
* is indexed by column.
* Storage: original row subscripts
*
* During the course of sparse LU factorization, we also use
* (xlsub,lsub) for the purpose of symmetric pruning. For each
* supernode {s,s+1,...,t=s+r} with first column s and last
* column t, the subscript set
* lsub[j], j=xlsub[s], .., xlsub[s+1]-1
* is the structure of column s (i.e. structure of this supernode).
* It is used for the storage of numerical values.
* Furthermore,
* lsub[j], j=xlsub[t], .., xlsub[t+1]-1
* is the structure of the last column t of this supernode.
* It is for the purpose of symmetric pruning. Therefore, the
* structural subscripts can be rearranged without making physical
* interchanges among the numerical values.
*
* However, if the supernode has only one column, then we
* only keep one set of subscripts. For any subscript interchange
* performed, similar interchange must be done on the numerical
* values.
*
* The last column structures (for pruning) will be removed
* after the numercial LU factorization phase.
*
* (xlusup,lusup): lusup[*] contains the numerical values of the
* rectangular supernodes; xlusup[j] points to the starting
* location of the j-th column in storage vector lusup[*]
* Note: xlusup is indexed by column.
* Each rectangular supernode is stored by column-major
* scheme, consistent with Fortran 2-dim array storage.
*
* (xusub,ucol,usub): ucol[*] stores the numerical values of
* U-columns outside the rectangular supernodes. The row
* subscript of nonzero ucol[k] is stored in usub[k].
* xusub[i] points to the starting location of column i in ucol.
* Storage: new row subscripts; that is subscripts of PA.
*/
#ifndef EIGEN_LU_STRUCTS
#define EIGEN_LU_STRUCTS
namespace Eigen {
namespace internal {
typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType;
template <typename IndexVector, typename ScalarVector>
struct LU_GlobalLU_t {
typedef typename IndexVector::Scalar StorageIndex;
IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode
IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping)
ScalarVector lusup; // nonzero values of L ordered by columns
IndexVector lsub; // Compressed row indices of L rectangular supernodes.
IndexVector xlusup; // pointers to the beginning of each column in lusup
IndexVector xlsub; // pointers to the beginning of each column in lsub
Index nzlmax; // Current max size of lsub
Index nzlumax; // Current max size of lusup
ScalarVector ucol; // nonzero values of U ordered by columns
IndexVector usub; // row indices of U columns in ucol
IndexVector xusub; // Pointers to the beginning of each column of U in ucol
Index nzumax; // Current max size of ucol
Index n; // Number of columns in the matrix
Index num_expansions;
};
// Values to set for performance
struct perfvalues {
Index panel_size; // a panel consists of at most <panel_size> consecutive columns
Index relax; // To control degree of relaxing supernodes. If the number of nodes (columns)
// in a subtree of the elimination tree is less than relax, this subtree is considered
// as one supernode regardless of the row structures of those columns
Index maxsuper; // The maximum size for a supernode in complete LU
Index rowblk; // The minimum row dimension for 2-D blocking to be used;
Index colblk; // The minimum column dimension for 2-D blocking to be used;
Index fillfactor; // The estimated fills factors for L and U, compared with A
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_LU_STRUCTS
| 4,972 | 43.801802 | 103 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseLU/SparseLU_Utils.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSELU_UTILS_H
#define EIGEN_SPARSELU_UTILS_H
namespace Eigen {
namespace internal {
/**
* \brief Count Nonzero elements in the factors
*/
template <typename Scalar, typename StorageIndex>
void SparseLUImpl<Scalar,StorageIndex>::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu)
{
nnzL = 0;
nnzU = (glu.xusub)(n);
Index nsuper = (glu.supno)(n);
Index jlen;
Index i, j, fsupc;
if (n <= 0 ) return;
// For each supernode
for (i = 0; i <= nsuper; i++)
{
fsupc = glu.xsup(i);
jlen = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
for (j = fsupc; j < glu.xsup(i+1); j++)
{
nnzL += jlen;
nnzU += j - fsupc + 1;
jlen--;
}
}
}
/**
* \brief Fix up the data storage lsub for L-subscripts.
*
* It removes the subscripts sets for structural pruning,
* and applies permutation to the remaining subscripts
*
*/
template <typename Scalar, typename StorageIndex>
void SparseLUImpl<Scalar,StorageIndex>::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu)
{
Index fsupc, i, j, k, jstart;
StorageIndex nextl = 0;
Index nsuper = (glu.supno)(n);
// For each supernode
for (i = 0; i <= nsuper; i++)
{
fsupc = glu.xsup(i);
jstart = glu.xlsub(fsupc);
glu.xlsub(fsupc) = nextl;
for (j = jstart; j < glu.xlsub(fsupc + 1); j++)
{
glu.lsub(nextl) = perm_r(glu.lsub(j)); // Now indexed into P*A
nextl++;
}
for (k = fsupc+1; k < glu.xsup(i+1); k++)
glu.xlsub(k) = nextl; // other columns in supernode i
}
glu.xlsub(n) = nextl;
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSELU_UTILS_H
| 2,047 | 24.283951 | 105 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseLU/SparseLU_column_bmod.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]>
// Copyright (C) 2012 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/*
* NOTE: This file is the modified version of xcolumn_bmod.c file in SuperLU
* -- SuperLU routine (version 3.0) --
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
* and Lawrence Berkeley National Lab.
* October 15, 2003
*
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program for any
* purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is
* granted, provided the above notices are retained, and a notice that
* the code was modified is included with the above copyright notice.
*/
#ifndef SPARSELU_COLUMN_BMOD_H
#define SPARSELU_COLUMN_BMOD_H
namespace Eigen {
namespace internal {
/**
* \brief Performs numeric block updates (sup-col) in topological order
*
* \param jcol current column to update
* \param nseg Number of segments in the U part
* \param dense Store the full representation of the column
* \param tempv working array
* \param segrep segment representative ...
* \param repfnz ??? First nonzero column in each row ??? ...
* \param fpanelc First column in the current panel
* \param glu Global LU data.
* \return 0 - successful return
* > 0 - number of bytes allocated when run out of space
*
*/
template <typename Scalar, typename StorageIndex>
Index SparseLUImpl<Scalar,StorageIndex>::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv,
BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu)
{
Index jsupno, k, ksub, krep, ksupno;
Index lptr, nrow, isub, irow, nextlu, new_next, ufirst;
Index fsupc, nsupc, nsupr, luptr, kfnz, no_zeros;
/* krep = representative of current k-th supernode
* fsupc = first supernodal column
* nsupc = number of columns in a supernode
* nsupr = number of rows in a supernode
* luptr = location of supernodal LU-block in storage
* kfnz = first nonz in the k-th supernodal segment
* no_zeros = no lf leading zeros in a supernodal U-segment
*/
jsupno = glu.supno(jcol);
// For each nonzero supernode segment of U[*,j] in topological order
k = nseg - 1;
Index d_fsupc; // distance between the first column of the current panel and the
// first column of the current snode
Index fst_col; // First column within small LU update
Index segsize;
for (ksub = 0; ksub < nseg; ksub++)
{
krep = segrep(k); k--;
ksupno = glu.supno(krep);
if (jsupno != ksupno )
{
// outside the rectangular supernode
fsupc = glu.xsup(ksupno);
fst_col = (std::max)(fsupc, fpanelc);
// Distance from the current supernode to the current panel;
// d_fsupc = 0 if fsupc > fpanelc
d_fsupc = fst_col - fsupc;
luptr = glu.xlusup(fst_col) + d_fsupc;
lptr = glu.xlsub(fsupc) + d_fsupc;
kfnz = repfnz(krep);
kfnz = (std::max)(kfnz, fpanelc);
segsize = krep - kfnz + 1;
nsupc = krep - fst_col + 1;
nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc);
nrow = nsupr - d_fsupc - nsupc;
Index lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col);
// Perform a triangular solver and block update,
// then scatter the result of sup-col update to dense
no_zeros = kfnz - fst_col;
if(segsize==1)
LU_kernel_bmod<1>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
else
LU_kernel_bmod<Dynamic>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
} // end if jsupno
} // end for each segment
// Process the supernodal portion of L\U[*,j]
nextlu = glu.xlusup(jcol);
fsupc = glu.xsup(jsupno);
// copy the SPA dense into L\U[*,j]
Index mem;
new_next = nextlu + glu.xlsub(fsupc + 1) - glu.xlsub(fsupc);
Index offset = internal::first_multiple<Index>(new_next, internal::packet_traits<Scalar>::size) - new_next;
if(offset)
new_next += offset;
while (new_next > glu.nzlumax )
{
mem = memXpand<ScalarVector>(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions);
if (mem) return mem;
}
for (isub = glu.xlsub(fsupc); isub < glu.xlsub(fsupc+1); isub++)
{
irow = glu.lsub(isub);
glu.lusup(nextlu) = dense(irow);
dense(irow) = Scalar(0.0);
++nextlu;
}
if(offset)
{
glu.lusup.segment(nextlu,offset).setZero();
nextlu += offset;
}
glu.xlusup(jcol + 1) = StorageIndex(nextlu); // close L\U(*,jcol);
/* For more updates within the panel (also within the current supernode),
* should start from the first column of the panel, or the first column
* of the supernode, whichever is bigger. There are two cases:
* 1) fsupc < fpanelc, then fst_col <-- fpanelc
* 2) fsupc >= fpanelc, then fst_col <-- fsupc
*/
fst_col = (std::max)(fsupc, fpanelc);
if (fst_col < jcol)
{
// Distance between the current supernode and the current panel
// d_fsupc = 0 if fsupc >= fpanelc
d_fsupc = fst_col - fsupc;
lptr = glu.xlsub(fsupc) + d_fsupc;
luptr = glu.xlusup(fst_col) + d_fsupc;
nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); // leading dimension
nsupc = jcol - fst_col; // excluding jcol
nrow = nsupr - d_fsupc - nsupc;
// points to the beginning of jcol in snode L\U(jsupno)
ufirst = glu.xlusup(jcol) + d_fsupc;
Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
MappedMatrixBlock A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc);
u = A.template triangularView<UnitLower>().solve(u);
new (&A) MappedMatrixBlock ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );
VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow);
l.noalias() -= A * u;
} // End if fst_col
return 0;
}
} // end namespace internal
} // end namespace Eigen
#endif // SPARSELU_COLUMN_BMOD_H
| 6,710 | 35.873626 | 134 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2012 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSELU_GEMM_KERNEL_H
#define EIGEN_SPARSELU_GEMM_KERNEL_H
namespace Eigen {
namespace internal {
/** \internal
* A general matrix-matrix product kernel optimized for the SparseLU factorization.
* - A, B, and C must be column major
* - lda and ldc must be multiples of the respective packet size
* - C must have the same alignment as A
*/
template<typename Scalar>
EIGEN_DONT_INLINE
void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc)
{
using namespace Eigen::internal;
typedef typename packet_traits<Scalar>::type Packet;
enum {
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
PacketSize = packet_traits<Scalar>::size,
PM = 8, // peeling in M
RN = 2, // register blocking
RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking
BM = 4096/sizeof(Scalar), // number of rows of A-C per chunk
SM = PM*PacketSize // step along M
};
Index d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking
Index n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once
Index i0 = internal::first_default_aligned(A,m);
eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_default_aligned(C,m)));
// handle the non aligned rows of A and C without any optimization:
for(Index i=0; i<i0; ++i)
{
for(Index j=0; j<n; ++j)
{
Scalar c = C[i+j*ldc];
for(Index k=0; k<d; ++k)
c += B[k+j*ldb] * A[i+k*lda];
C[i+j*ldc] = c;
}
}
// process the remaining rows per chunk of BM rows
for(Index ib=i0; ib<m; ib+=BM)
{
Index actual_b = std::min<Index>(BM, m-ib); // actual number of rows
Index actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling
Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization
// Let's process two columns of B-C at once
for(Index j=0; j<n_end; j+=RN)
{
const Scalar* Bc0 = B+(j+0)*ldb;
const Scalar* Bc1 = B+(j+1)*ldb;
for(Index k=0; k<d_end; k+=RK)
{
// load and expand a RN x RK block of B
Packet b00, b10, b20, b30, b01, b11, b21, b31;
{ b00 = pset1<Packet>(Bc0[0]); }
{ b10 = pset1<Packet>(Bc0[1]); }
if(RK==4) { b20 = pset1<Packet>(Bc0[2]); }
if(RK==4) { b30 = pset1<Packet>(Bc0[3]); }
{ b01 = pset1<Packet>(Bc1[0]); }
{ b11 = pset1<Packet>(Bc1[1]); }
if(RK==4) { b21 = pset1<Packet>(Bc1[2]); }
if(RK==4) { b31 = pset1<Packet>(Bc1[3]); }
Packet a0, a1, a2, a3, c0, c1, t0, t1;
const Scalar* A0 = A+ib+(k+0)*lda;
const Scalar* A1 = A+ib+(k+1)*lda;
const Scalar* A2 = A+ib+(k+2)*lda;
const Scalar* A3 = A+ib+(k+3)*lda;
Scalar* C0 = C+ib+(j+0)*ldc;
Scalar* C1 = C+ib+(j+1)*ldc;
a0 = pload<Packet>(A0);
a1 = pload<Packet>(A1);
if(RK==4)
{
a2 = pload<Packet>(A2);
a3 = pload<Packet>(A3);
}
else
{
// workaround "may be used uninitialized in this function" warning
a2 = a3 = a0;
}
#define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);}
#define WORK(I) \
c0 = pload<Packet>(C0+i+(I)*PacketSize); \
c1 = pload<Packet>(C1+i+(I)*PacketSize); \
KMADD(c0, a0, b00, t0) \
KMADD(c1, a0, b01, t1) \
a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
KMADD(c0, a1, b10, t0) \
KMADD(c1, a1, b11, t1) \
a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
if(RK==4){ KMADD(c0, a2, b20, t0) }\
if(RK==4){ KMADD(c1, a2, b21, t1) }\
if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\
if(RK==4){ KMADD(c0, a3, b30, t0) }\
if(RK==4){ KMADD(c1, a3, b31, t1) }\
if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
pstore(C0+i+(I)*PacketSize, c0); \
pstore(C1+i+(I)*PacketSize, c1)
// process rows of A' - C' with aggressive vectorization and peeling
for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
{
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1");
prefetch((A0+i+(5)*PacketSize));
prefetch((A1+i+(5)*PacketSize));
if(RK==4) prefetch((A2+i+(5)*PacketSize));
if(RK==4) prefetch((A3+i+(5)*PacketSize));
WORK(0);
WORK(1);
WORK(2);
WORK(3);
WORK(4);
WORK(5);
WORK(6);
WORK(7);
}
// process the remaining rows with vectorization only
for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
{
WORK(0);
}
#undef WORK
// process the remaining rows without vectorization
for(Index i=actual_b_end2; i<actual_b; ++i)
{
if(RK==4)
{
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3];
}
else
{
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1];
}
}
Bc0 += RK;
Bc1 += RK;
} // peeled loop on k
} // peeled loop on the columns j
// process the last column (we now perform a matrix-vector product)
if((n-n_end)>0)
{
const Scalar* Bc0 = B+(n-1)*ldb;
for(Index k=0; k<d_end; k+=RK)
{
// load and expand a 1 x RK block of B
Packet b00, b10, b20, b30;
b00 = pset1<Packet>(Bc0[0]);
b10 = pset1<Packet>(Bc0[1]);
if(RK==4) b20 = pset1<Packet>(Bc0[2]);
if(RK==4) b30 = pset1<Packet>(Bc0[3]);
Packet a0, a1, a2, a3, c0, t0/*, t1*/;
const Scalar* A0 = A+ib+(k+0)*lda;
const Scalar* A1 = A+ib+(k+1)*lda;
const Scalar* A2 = A+ib+(k+2)*lda;
const Scalar* A3 = A+ib+(k+3)*lda;
Scalar* C0 = C+ib+(n_end)*ldc;
a0 = pload<Packet>(A0);
a1 = pload<Packet>(A1);
if(RK==4)
{
a2 = pload<Packet>(A2);
a3 = pload<Packet>(A3);
}
else
{
// workaround "may be used uninitialized in this function" warning
a2 = a3 = a0;
}
#define WORK(I) \
c0 = pload<Packet>(C0+i+(I)*PacketSize); \
KMADD(c0, a0, b00, t0) \
a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
KMADD(c0, a1, b10, t0) \
a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
if(RK==4){ KMADD(c0, a2, b20, t0) }\
if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\
if(RK==4){ KMADD(c0, a3, b30, t0) }\
if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
pstore(C0+i+(I)*PacketSize, c0);
// agressive vectorization and peeling
for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
{
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
WORK(0);
WORK(1);
WORK(2);
WORK(3);
WORK(4);
WORK(5);
WORK(6);
WORK(7);
}
// vectorization only
for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
{
WORK(0);
}
// remaining scalars
for(Index i=actual_b_end2; i<actual_b; ++i)
{
if(RK==4)
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
else
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
}
Bc0 += RK;
#undef WORK
}
}
// process the last columns of A, corresponding to the last rows of B
Index rd = d-d_end;
if(rd>0)
{
for(Index j=0; j<n; ++j)
{
enum {
Alignment = PacketSize>1 ? Aligned : 0
};
typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector;
typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector;
if(rd==1) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b);
else if(rd==2) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
+ B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b);
else MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
+ B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b)
+ B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b);
}
}
} // blocking on the rows of A and C
}
#undef KMADD
} // namespace internal
} // namespace Eigen
#endif // EIGEN_SPARSELU_GEMM_KERNEL_H
| 10,216 | 35.359431 | 123 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/StlSupport/StdVector.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Hauke Heibel <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_STDVECTOR_H
#define EIGEN_STDVECTOR_H
#include "details.h"
/**
* This section contains a convenience MACRO which allows an easy specialization of
* std::vector such that for data types with alignment issues the correct allocator
* is used automatically.
*/
#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \
namespace std \
{ \
template<> \
class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> > \
: public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
{ \
typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \
public: \
typedef __VA_ARGS__ value_type; \
typedef vector_base::allocator_type allocator_type; \
typedef vector_base::size_type size_type; \
typedef vector_base::iterator iterator; \
explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \
template<typename InputIterator> \
vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \
vector(const vector& c) : vector_base(c) {} \
explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
vector(iterator start, iterator end) : vector_base(start, end) {} \
vector& operator=(const vector& x) { \
vector_base::operator=(x); \
return *this; \
} \
}; \
}
// Don't specialize if containers are implemented according to C++11
#if !EIGEN_HAS_CXX11_CONTAINERS
namespace std {
#define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \
public: \
typedef T value_type; \
typedef typename vector_base::allocator_type allocator_type; \
typedef typename vector_base::size_type size_type; \
typedef typename vector_base::iterator iterator; \
typedef typename vector_base::const_iterator const_iterator; \
explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \
template<typename InputIterator> \
vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \
: vector_base(first, last, a) {} \
vector(const vector& c) : vector_base(c) {} \
explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
vector(iterator start, iterator end) : vector_base(start, end) {} \
vector& operator=(const vector& x) { \
vector_base::operator=(x); \
return *this; \
}
template<typename T>
class vector<T,EIGEN_ALIGNED_ALLOCATOR<T> >
: public vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >
{
typedef vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > vector_base;
EIGEN_STD_VECTOR_SPECIALIZATION_BODY
void resize(size_type new_size)
{ resize(new_size, T()); }
#if defined(_VECTOR_)
// workaround MSVC std::vector implementation
void resize(size_type new_size, const value_type& x)
{
if (vector_base::size() < new_size)
vector_base::_Insert_n(vector_base::end(), new_size - vector_base::size(), x);
else if (new_size < vector_base::size())
vector_base::erase(vector_base::begin() + new_size, vector_base::end());
}
void push_back(const value_type& x)
{ vector_base::push_back(x); }
using vector_base::insert;
iterator insert(const_iterator position, const value_type& x)
{ return vector_base::insert(position,x); }
void insert(const_iterator position, size_type new_size, const value_type& x)
{ vector_base::insert(position, new_size, x); }
#elif defined(_GLIBCXX_VECTOR) && (!(EIGEN_GNUC_AT_LEAST(4,1)))
/* Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&).
* However, this specialization is still needed to make the above EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION trick to work. */
void resize(size_type new_size, const value_type& x)
{
vector_base::resize(new_size,x);
}
#elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2)
// workaround GCC std::vector implementation
void resize(size_type new_size, const value_type& x)
{
if (new_size < vector_base::size())
vector_base::_M_erase_at_end(this->_M_impl._M_start + new_size);
else
vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);
}
#else
// either GCC 4.1 or non-GCC
// default implementation which should always work.
void resize(size_type new_size, const value_type& x)
{
if (new_size < vector_base::size())
vector_base::erase(vector_base::begin() + new_size, vector_base::end());
else if (new_size > vector_base::size())
vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);
}
#endif
};
}
#endif // !EIGEN_HAS_CXX11_CONTAINERS
#endif // EIGEN_STDVECTOR_H
| 5,330 | 39.386364 | 130 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/StlSupport/details.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2009 Hauke Heibel <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_STL_DETAILS_H
#define EIGEN_STL_DETAILS_H
#ifndef EIGEN_ALIGNED_ALLOCATOR
#define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator
#endif
namespace Eigen {
// This one is needed to prevent reimplementing the whole std::vector.
template <class T>
class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR<T>
{
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template<class U>
struct rebind
{
typedef aligned_allocator_indirection<U> other;
};
aligned_allocator_indirection() {}
aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR<T>() {}
aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<T>& ) {}
template<class U>
aligned_allocator_indirection(const aligned_allocator_indirection<U>& ) {}
template<class U>
aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<U>& ) {}
~aligned_allocator_indirection() {}
};
#if EIGEN_COMP_MSVC
// sometimes, MSVC detects, at compile time, that the argument x
// in std::vector::resize(size_t s,T x) won't be aligned and generate an error
// even if this function is never called. Whence this little wrapper.
#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) \
typename Eigen::internal::conditional< \
Eigen::internal::is_arithmetic<T>::value, \
T, \
Eigen::internal::workaround_msvc_stl_support<T> \
>::type
namespace internal {
template<typename T> struct workaround_msvc_stl_support : public T
{
inline workaround_msvc_stl_support() : T() {}
inline workaround_msvc_stl_support(const T& other) : T(other) {}
inline operator T& () { return *static_cast<T*>(this); }
inline operator const T& () const { return *static_cast<const T*>(this); }
template<typename OtherT>
inline T& operator=(const OtherT& other)
{ T::operator=(other); return *this; }
inline workaround_msvc_stl_support& operator=(const workaround_msvc_stl_support& other)
{ T::operator=(other); return *this; }
};
}
#else
#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) T
#endif
}
#endif // EIGEN_STL_DETAILS_H
| 2,809 | 32.058824 | 106 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/misc/Image.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MISC_IMAGE_H
#define EIGEN_MISC_IMAGE_H
namespace Eigen {
namespace internal {
/** \class image_retval_base
*
*/
template<typename DecompositionType>
struct traits<image_retval_base<DecompositionType> >
{
typedef typename DecompositionType::MatrixType MatrixType;
typedef Matrix<
typename MatrixType::Scalar,
MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose
// dimension is the number of rows of the original matrix
Dynamic, // we don't know at compile time the dimension of the image (the rank)
MatrixType::Options,
MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix,
MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns.
> ReturnType;
};
template<typename _DecompositionType> struct image_retval_base
: public ReturnByValue<image_retval_base<_DecompositionType> >
{
typedef _DecompositionType DecompositionType;
typedef typename DecompositionType::MatrixType MatrixType;
typedef ReturnByValue<image_retval_base> Base;
image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix)
: m_dec(dec), m_rank(dec.rank()),
m_cols(m_rank == 0 ? 1 : m_rank),
m_originalMatrix(originalMatrix)
{}
inline Index rows() const { return m_dec.rows(); }
inline Index cols() const { return m_cols; }
inline Index rank() const { return m_rank; }
inline const DecompositionType& dec() const { return m_dec; }
inline const MatrixType& originalMatrix() const { return m_originalMatrix; }
template<typename Dest> inline void evalTo(Dest& dst) const
{
static_cast<const image_retval<DecompositionType>*>(this)->evalTo(dst);
}
protected:
const DecompositionType& m_dec;
Index m_rank, m_cols;
const MatrixType& m_originalMatrix;
};
} // end namespace internal
#define EIGEN_MAKE_IMAGE_HELPERS(DecompositionType) \
typedef typename DecompositionType::MatrixType MatrixType; \
typedef typename MatrixType::Scalar Scalar; \
typedef typename MatrixType::RealScalar RealScalar; \
typedef Eigen::internal::image_retval_base<DecompositionType> Base; \
using Base::dec; \
using Base::originalMatrix; \
using Base::rank; \
using Base::rows; \
using Base::cols; \
image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \
: Base(dec, originalMatrix) {}
} // end namespace Eigen
#endif // EIGEN_MISC_IMAGE_H
| 2,913 | 34.108434 | 107 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/misc/Kernel.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_MISC_KERNEL_H
#define EIGEN_MISC_KERNEL_H
namespace Eigen {
namespace internal {
/** \class kernel_retval_base
*
*/
template<typename DecompositionType>
struct traits<kernel_retval_base<DecompositionType> >
{
typedef typename DecompositionType::MatrixType MatrixType;
typedef Matrix<
typename MatrixType::Scalar,
MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix"
// is the number of cols of the original matrix
// so that the product "matrix * kernel = zero" makes sense
Dynamic, // we don't know at compile-time the dimension of the kernel
MatrixType::Options,
MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter
MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space,
// whose dimension is the number of columns of the original matrix
> ReturnType;
};
template<typename _DecompositionType> struct kernel_retval_base
: public ReturnByValue<kernel_retval_base<_DecompositionType> >
{
typedef _DecompositionType DecompositionType;
typedef ReturnByValue<kernel_retval_base> Base;
explicit kernel_retval_base(const DecompositionType& dec)
: m_dec(dec),
m_rank(dec.rank()),
m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank)
{}
inline Index rows() const { return m_dec.cols(); }
inline Index cols() const { return m_cols; }
inline Index rank() const { return m_rank; }
inline const DecompositionType& dec() const { return m_dec; }
template<typename Dest> inline void evalTo(Dest& dst) const
{
static_cast<const kernel_retval<DecompositionType>*>(this)->evalTo(dst);
}
protected:
const DecompositionType& m_dec;
Index m_rank, m_cols;
};
} // end namespace internal
#define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \
typedef typename DecompositionType::MatrixType MatrixType; \
typedef typename MatrixType::Scalar Scalar; \
typedef typename MatrixType::RealScalar RealScalar; \
typedef Eigen::internal::kernel_retval_base<DecompositionType> Base; \
using Base::dec; \
using Base::rank; \
using Base::rows; \
using Base::cols; \
kernel_retval(const DecompositionType& dec) : Base(dec) {}
} // end namespace Eigen
#endif // EIGEN_MISC_KERNEL_H
| 2,742 | 33.2875 | 103 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/misc/RealSvd2x2.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <[email protected]>
// Copyright (C) 2013-2016 Gael Guennebaud <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_REALSVD2X2_H
#define EIGEN_REALSVD2X2_H
namespace Eigen {
namespace internal {
template<typename MatrixType, typename RealScalar, typename Index>
void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
JacobiRotation<RealScalar> *j_left,
JacobiRotation<RealScalar> *j_right)
{
using std::sqrt;
using std::abs;
Matrix<RealScalar,2,2> m;
m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
JacobiRotation<RealScalar> rot1;
RealScalar t = m.coeff(0,0) + m.coeff(1,1);
RealScalar d = m.coeff(1,0) - m.coeff(0,1);
if(abs(d) < (std::numeric_limits<RealScalar>::min)())
{
rot1.s() = RealScalar(0);
rot1.c() = RealScalar(1);
}
else
{
// If d!=0, then t/d cannot overflow because the magnitude of the
// entries forming d are not too small compared to the ones forming t.
RealScalar u = t / d;
RealScalar tmp = sqrt(RealScalar(1) + numext::abs2(u));
rot1.s() = RealScalar(1) / tmp;
rot1.c() = u / tmp;
}
m.applyOnTheLeft(0,1,rot1);
j_right->makeJacobi(m,0,1);
*j_left = rot1 * j_right->transpose();
}
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_REALSVD2X2_H
| 1,748 | 30.232143 | 74 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/Eigen/src/plugins/MatrixCwiseUnaryOps.h
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This file is included into the body of the base classes supporting matrix specific coefficient-wise functions.
// This include MatrixBase and SparseMatrixBase.
typedef CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> CwiseAbsReturnType;
typedef CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived> CwiseAbs2ReturnType;
typedef CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> CwiseSqrtReturnType;
typedef CwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived> CwiseSignReturnType;
typedef CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> CwiseInverseReturnType;
/// \returns an expression of the coefficient-wise absolute value of \c *this
///
/// Example: \include MatrixBase_cwiseAbs.cpp
/// Output: \verbinclude MatrixBase_cwiseAbs.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseAbs,absolute value)
///
/// \sa cwiseAbs2()
///
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseAbsReturnType
cwiseAbs() const { return CwiseAbsReturnType(derived()); }
/// \returns an expression of the coefficient-wise squared absolute value of \c *this
///
/// Example: \include MatrixBase_cwiseAbs2.cpp
/// Output: \verbinclude MatrixBase_cwiseAbs2.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseAbs2,squared absolute value)
///
/// \sa cwiseAbs()
///
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const CwiseAbs2ReturnType
cwiseAbs2() const { return CwiseAbs2ReturnType(derived()); }
/// \returns an expression of the coefficient-wise square root of *this.
///
/// Example: \include MatrixBase_cwiseSqrt.cpp
/// Output: \verbinclude MatrixBase_cwiseSqrt.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseSqrt,square-root)
///
/// \sa cwisePow(), cwiseSquare()
///
EIGEN_DEVICE_FUNC
inline const CwiseSqrtReturnType
cwiseSqrt() const { return CwiseSqrtReturnType(derived()); }
/// \returns an expression of the coefficient-wise signum of *this.
///
/// Example: \include MatrixBase_cwiseSign.cpp
/// Output: \verbinclude MatrixBase_cwiseSign.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseSign,sign function)
///
EIGEN_DEVICE_FUNC
inline const CwiseSignReturnType
cwiseSign() const { return CwiseSignReturnType(derived()); }
/// \returns an expression of the coefficient-wise inverse of *this.
///
/// Example: \include MatrixBase_cwiseInverse.cpp
/// Output: \verbinclude MatrixBase_cwiseInverse.out
///
EIGEN_DOC_UNARY_ADDONS(cwiseInverse,inverse)
///
/// \sa cwiseProduct()
///
EIGEN_DEVICE_FUNC
inline const CwiseInverseReturnType
cwiseInverse() const { return CwiseInverseReturnType(derived()); }
| 2,937 | 33.162791 | 113 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/cloud/cloud.h
|
//
//
// 0==========================0
// | Local feature test |
// 0==========================0
//
// version 1.0 :
// >
//
//---------------------------------------------------
//
// Cloud header
//
//----------------------------------------------------
//
// Hugues THOMAS - 10/02/2017
//
# pragma once
#include <Eigen/Eigenvalues>
#include "points.h"
#include <chrono>
#include <random>
//------------------------------------------------------------------------------------------------------------
// PointCloud class
// ****************
//
//------------------------------------------------------------------------------------------------------------
struct PointCloud
{
std::vector<PointXYZ> pts;
// Must return the number of data points
inline size_t kdtree_get_point_count() const { return pts.size(); }
// Returns the dim'th component of the idx'th point in the class:
// Since this is inlined and the "dim" argument is typically an immediate value, the
// "if/else's" are actually solved at compile time.
inline float kdtree_get_pt(const size_t idx, const size_t dim) const
{
if (dim == 0) return pts[idx].x;
else if (dim == 1) return pts[idx].y;
else return pts[idx].z;
}
// Optional bounding-box computation: return false to default to a standard bbox computation loop.
// Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again.
// Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds)
template <class BBOX>
bool kdtree_get_bbox(BBOX& /* bb */) const { return false; }
};
// Utility function for pointclouds
void filter_pointcloud(std::vector<PointXYZ>& pts, std::vector<float>& scores, float filter_value);
void filter_floatvector(std::vector<float>& vec, float filter_value);
template <class T>
void filter_anyvector(std::vector<T>& vec, std::vector<float>& scores, float filter_value)
{
// Remove every element whose score is < filter_value
auto vec_address = vec.data();
vec.erase(std::remove_if(vec.begin(), vec.end(),
[&scores, vec_address, filter_value](const T& f) { return scores[(size_t)(&f - vec_address)] < filter_value; }),
vec.end());
}
// PLY reading/saving functions
void save_cloud(std::string dataPath, std::vector<PointXYZ>& points, std::vector<PointXYZ>& normals, std::vector<float>& features);
void save_cloud(std::string dataPath, std::vector<PointXYZ>& points, std::vector<float>& features);
void save_cloud(std::string dataPath, std::vector<PointXYZ>& points, std::vector<PointXYZ>& normals);
void save_cloud(std::string dataPath, std::vector<PointXYZ>& points);
void load_cloud(std::string& dataPath,
std::vector<PointXYZ>& points);
void load_cloud(std::string& dataPath,
std::vector<PointXYZ>& points,
std::vector<float>& float_scalar,
std::string& float_scalar_name,
std::vector<int>& int_scalar,
std::string& int_scalar_name);
void load_annot(std::string &dataPath,
std::vector<int> &int_scalar,
std::string &int_scalar_name);
void load_frame(std::string &dataPath,
std::vector<PointXYZ> &f_pts,
std::vector<float> ×tamps,
std::vector<int> &rings,
std::vector<int> &loc_labels,
std::string &save_path,
std::string &time_name,
std::string &ring_name);
void random_3_pick(int &A_i, int &B_i, int &C_i,
std::uniform_int_distribution<int> &distribution,
std::default_random_engine &generator);
bool is_triplet_bad(PointXYZ &A, PointXYZ &B, PointXYZ &C, PointXYZ &u);
Plane3D plane_ransac(std::vector<PointXYZ> &points,
float max_dist = 0.1,
int max_steps = 100);
Plane3D frame_ground_ransac(std::vector<PointXYZ> &points,
std::vector<PointXYZ> &normals,
float vertical_thresh_deg = 10.0,
float max_dist = 0.1,
float ground_z = 0.0);
bool rot_u_to_v(PointXYZ u, PointXYZ v, Eigen::Matrix3d &R);
// float tukey(float x);
| 3,933 | 30.725806 | 131 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/neighbors/neighbors.h
|
#pragma once
#include "../cloud/cloud.h"
#include "../nanoflann/nanoflann.hpp"
#include <set>
#include <cstdint>
using namespace std;
void ordered_neighbors(vector<PointXYZ>& queries,
vector<PointXYZ>& supports,
vector<int>& neighbors_indices,
float radius);
void batch_ordered_neighbors(vector<PointXYZ>& queries,
vector<PointXYZ>& supports,
vector<int>& q_batches,
vector<int>& s_batches,
vector<int>& neighbors_indices,
float radius);
void batch_nanoflann_neighbors(vector<PointXYZ>& queries,
vector<PointXYZ>& supports,
vector<int>& q_batches,
vector<int>& s_batches,
vector<int>& neighbors_indices,
float radius);
| 1,018 | 32.966667 | 63 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/npm_ply/ply_file_in.h
|
/*----------------------------------------------------------------------------*\
NPM Toolbox: Various functions and classes for point cloud processing
--------------------------------------------------------------------------------
Author(s): Hassan Bouchiba
Creation: 04 Nov 2015
\*----------------------------------------------------------------------------*/
#ifndef NPM_TOOLBOX_IO_PLY_FILE_IN_H_
#define NPM_TOOLBOX_IO_PLY_FILE_IN_H_
#include <cstdint>
#include <iostream>
#include <string>
#include <vector>
#include "ply_types.h"
namespace npm {
/*----------------------------------------------------------------------------*\
class PLYFileIn declaration
\*----------------------------------------------------------------------------*/
// Class to handle point cloud PLY file input
// Warining : This class does not handle other information that may be present
// in the PLY file (e.g : triangles).
class PLYFileIn {
public:
static uint64_t kReadBufferSize;
// Constructor.
PLYFileIn(std::string file_path);
// Destructor.
~PLYFileIn();
// Read point cloud data from PLY file this data. At this point data is only
// stored internally in this class. Data must then be retrieved with
// PopNextField() function.
bool read(uint64_t* num_pt=nullptr, std::vector<PLYType>* types=nullptr,
std::vector<std::string>* properties=nullptr);
// Pop dim-dimentional field. A field is componed by multiple scalar fields
// of the same type.
template<typename T>
bool getField(int field_index, int dim, std::vector<T>& data);
// Returns file header.
std::string header() const;
// Returns internal format.
PLYFormat internal_format() const;
// Modify binary read buffer size in bytes.
static void set_read_buffer_size(uint64_t read_buffer_size);
private:
// Auxiliary function to parse the PLY header header_.
bool parseHeader();
private:
std::string file_path_;
PLYFormat internal_format_;
uint64_t num_points_;
std::string header_;
std::vector<std::string> properties_;
std::vector<PLYType> types_;
std::vector<char*> fields_;
std::vector<size_t> fields_sizes_;
};
} // namespace npm
/*----------------------------------------------------------------------------*\
class PLYFileIn definition (inline functions)
\*----------------------------------------------------------------------------*/
inline
npm::PLYFileIn::PLYFileIn(std::string file_path)
: file_path_(file_path),
internal_format_(PLY_BINARY_LITTLE_ENDIAN),
num_points_(0),
header_("") {}
inline
npm::PLYFileIn::~PLYFileIn() {
for (auto& f: fields_) {delete[] f;}
}
inline
std::string npm::PLYFileIn::header() const {
return header_;
}
inline
npm::PLYFormat npm::PLYFileIn::internal_format() const {
return internal_format_;
}
inline
void npm::PLYFileIn::set_read_buffer_size(uint64_t read_buffer_size) {
kReadBufferSize = read_buffer_size;
}
template<typename T>
inline
bool npm::PLYFileIn::getField(int field_index, int dim, std::vector<T>& data) {
using std::cout; using std::endl;
// Perform checks on input data
// ------------------------------------------------------------------------ //
if ((field_index + dim) > (int)properties_.size()) {
cout
<< "[PLYFileIn] error: mismatch between requested field dimension ("
<< dim << ") and remaining fields to read in PLY file ("
<< properties_.size()-field_index << "/" << properties_.size()
<< ")" << endl;
return false;
}
size_t input_vec_size(sizeof(T));
size_t expected_vec_size(0);
for (int i(0); i<dim; ++i) {
expected_vec_size += fields_sizes_[field_index+i];
}
if (input_vec_size != expected_vec_size) {
cout
<< "[PLYFileIn] error: mismatch between input field size ("
<< input_vec_size << ") (sizeof (T) for std::vector<T>) and expected "
<< "size (" << expected_vec_size << ")" << endl;
return false;
}
// Fill input vector
// ------------------------------------------------------------------------ //
data.resize(num_points_);
char* in_data_tab = reinterpret_cast<char*>(data.data());
uint64_t in_data_offset(0);
uint64_t field_offset(0);
for (uint64_t i(0); i<num_points_; ++i) {
in_data_offset = i * input_vec_size;
for (uint64_t j(0); (int)j<dim; ++j) {
size_t field_ind = field_index + j;
size_t field_size = fields_sizes_[field_ind];
field_offset = i * field_size;
for (uint64_t k(0); k<field_size; ++k) {
in_data_tab[in_data_offset] = fields_[field_ind][field_offset];
++in_data_offset;
++field_offset;
}
}
}
// delete used scalar fields.
for (uint64_t j(0); (int)j<dim; ++j) {
size_t field_ind = field_index + j;
delete[] fields_[field_ind];
fields_[field_ind] = nullptr;
}
return true;
}
#ifdef NPM_UNCOMMENT
// PLYFileIn class definition
template<typename... Targs>
bool npm::PLYFileIn::Read(Targs&... vectors) {
// ----- Check file stream state ------------------------------------------ //
switch (state_) {
case State::kError: {
return false;
}
case State::kOpen: {
if (!ReadHeader())
return false;
break;
}
case State::kHeaderRead: {
// if ReadHeader() function have been already called
break;
}
case State::kFileRead: {
std::cout << "Error: file already read" << std::endl;
return false;
}
}
// ----- Retrieve vectors meta-information -------------------------------- //
std::vector<size_t> vector_sizes; // sizeof(T) for each vector
std::vector<char*> vector_data; // data() of each vector
size_t num_vectors = sizeof...(vectors);
AllocateMemoryAndGatherData(num_points_, vector_sizes,
vector_data, vectors...);
// Check if input vectors match file data
size_t vec_point_size_in_bytes = 0;
for (auto& s:vector_sizes) {
vec_point_size_in_bytes += s;
}
size_t file_point_size_in_bytes = 0;
for (auto& t:types_) {
file_point_size_in_bytes += TypeSize(t);
}
size_t file_data_size_in_bytes = file_point_size_in_bytes * num_points_;
if (vec_point_size_in_bytes > file_point_size_in_bytes) {
std::cout << "Error: provided vectors do not match the input file data "
<< "(file point size "
<< file_point_size_in_bytes
<< " < vector point size "
<< vec_point_size_in_bytes << ")" << std::endl;
state_ = State::kError;
return false;
}
// ----- Read data from file ---------------------------------------------- //
char* data_buffer = new char[file_data_size_in_bytes];
// Copy data in data_buffer
switch (format_) {
// ---------------------------------------------------------------------- //
case PLYFormat::kBinaryLittleEendian: {
uint64_t n = file_data_size_in_bytes / kReadBufferSize;
uint64_t r = file_data_size_in_bytes % kReadBufferSize;
for (uint64_t i(0); i < n; ++i) {
file_in_.read(data_buffer + i*kReadBufferSize, kReadBufferSize);
}
file_in_.read(data_buffer + n*kReadBufferSize, r);
break;
}
// ---------------------------------------------------------------------- //
case PLYFormat::kBinaryBigEendian: {
std::cout << "Error: function not implemented yet for BBE format"
<< std::endl;
state_ = State::kError;
return false;
}
// ---------------------------------------------------------------------- //
case PLYFormat::kASCII: {
uint64_t buffer_offset(0);
std::string line("");
for (uint64_t i(0); i<num_points_; ++i) {
std::getline(file_in_, line);
std::stringstream line_stream(line);
buffer_offset = i * file_point_size_in_bytes;
for (auto& type:types_) {
// Remark:
// if we read ASCII data with
// if(type==PLYType::kChar)
// line_stream >> *reinterpret_cast<int8_t*>(data_buffer+buffer_offset);
// it does not work because line_stream will understand 'read next
// ASCII character and not "read next uint8_t integer" as we can
// expect
int interger_tmp;
if((type==PLYType::kChar) | (type==PLYType::kUChar) |
(type==PLYType::kShort) | (type==PLYType::kUShort) |
(type==PLYType::kInt) | (type==PLYType::kUInt)) {
line_stream >> interger_tmp;
}
if(type==PLYType::kChar)
*reinterpret_cast<int8_t*>(data_buffer+buffer_offset) = static_cast<int8_t>(interger_tmp);
if(type==PLYType::kUChar)
*reinterpret_cast<uint8_t*>(data_buffer+buffer_offset) = static_cast<uint8_t>(interger_tmp);
if(type==PLYType::kShort)
*reinterpret_cast<int16_t*>(data_buffer+buffer_offset) = static_cast<int16_t>(interger_tmp);
if(type==PLYType::kUShort)
*reinterpret_cast<uint16_t*>(data_buffer+buffer_offset) = static_cast<uint16_t>(interger_tmp);
if(type==PLYType::kInt)
*reinterpret_cast<int32_t*>(data_buffer+buffer_offset) = static_cast<int32_t>(interger_tmp);
if(type==PLYType::kUInt)
*reinterpret_cast<uint32_t*>(data_buffer+buffer_offset) = static_cast<uint32_t>(interger_tmp);
if(type==PLYType::kFloat)
line_stream >> *reinterpret_cast<float*>(data_buffer+buffer_offset);
if(type==PLYType::kDouble)
line_stream >> *reinterpret_cast<double*>(data_buffer+buffer_offset);
buffer_offset += TypeSize(type);
}
}
break;
}
// ---------------------------------------------------------------------- //
default: {
assert(false);
}
}
// Parse data into vectors
uint64_t buffer_offset(0);
uint64_t vector_offset(0);
for (uint64_t i(0); i<num_points_; ++i) {
buffer_offset = i * file_point_size_in_bytes;
for (size_t k(0); k<num_vectors; ++k) {
vector_offset = i * vector_sizes[k];
for (size_t j(0); j<vector_sizes[k]; ++j) {
vector_data[k][vector_offset] = data_buffer[buffer_offset];
++buffer_offset;
++vector_offset;
}
}
}
delete[] data_buffer;
state_ = State::kFileRead;
return true;
}
template<typename T>
inline
void npm::PLYFileIn::AllocateMemoryAndGatherData(
uint64_t num_points,
std::vector<size_t>& vec_sizes,
std::vector<char*>& vec_data,
std::vector<T>& first_vec) {
first_vec.resize(num_points);
vec_sizes.push_back(sizeof(T));
vec_data.push_back(reinterpret_cast<char*>(first_vec.data()));
}
template<typename T, typename... Targs>
inline
void npm::PLYFileIn::AllocateMemoryAndGatherData(
uint64_t num_points,
std::vector<size_t>& vec_sizes,
std::vector<char*>& vec_data,
std::vector<T>& first_vec,
Targs&... other_vec) {
first_vec.resize(num_points);
vec_sizes.push_back(sizeof(T));
vec_data.push_back(reinterpret_cast<char*>(first_vec.data()));
AllocateMemoryAndGatherData(num_points, vec_sizes, vec_data, other_vec...);
}
#endif // NPM_UNCOMMENT
#endif // NPM_TOOLBOX_IO_PLY_FILE_IN_H_
| 11,137 | 30.552408 | 106 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/npm_ply/ply_file_out.h
|
/*----------------------------------------------------------------------------*\
NPM Toolbox: Various functions and classes for point cloud processing
--------------------------------------------------------------------------------
Author(s): Hassan Bouchiba
Creation: 04 Nov 2015
\*----------------------------------------------------------------------------*/
#ifndef NPM_TOOLBOX_IO_PLY_FILE_OUT_H_
#define NPM_TOOLBOX_IO_PLY_FILE_OUT_H_
#include <cstdint>
#include <initializer_list>
#include <string>
#include <vector>
#include "ply_types.h"
namespace npm {
/*----------------------------------------------------------------------------*\
class PLYFileOut declaration
\*----------------------------------------------------------------------------*/
// Class to handle point cloud PLY file output
// Warining : This class does not handle other information that may be present
// in the PLY file (e.g : triangles)
// Usage:
// std::vector<Eigen::Vector3f> points;
// std::vector<double> ts;
//
// /*code here to fill the two vectors*/
//
// uint64_t n = points.size();
// PLYFileOut out(path);
// out.pushField(n, 3, PLY_FLOAT, {"x", "y", "z"}, points);
// out.pushField(n, 1, PLY_DOUBLE, {"ts"}, ts);
// out.write();
class PLYFileOut {
public:
static uint64_t kASCIIWritePrecision;
static uint64_t kWriteBufferSize;
// Constructor
PLYFileOut(std::string file_path, PLYFormat format=PLY_BINARY_LITTLE_ENDIAN);
// Push data field by field. A field is componed by multiple scalar fields
// of the same type.
template<typename T>
bool pushField(uint64_t num_pt, int dim, PLYType type,
std::initializer_list<std::string> properties, std::vector<T>& data);
// Writes data to external PLY file.
bool write();
// Returns file header.
std::string header() const;
// Modify to internal format.
void set_internal_format(PLYFormat new_format);
// Modify number of digits written for PLY_ASCII internal.
static void set_ascii_write_precision(uint64_t ascci_write_prec);
// Modify binary write buffer size in bytes.
static void set_write_buffer_size(uint64_t write_buffer_size);
private:
// Auxiliary function to generate the PLY file header.
void genHeader();
private:
std::string file_path_;
PLYFormat internal_format_;
//bool open_;
uint64_t num_points_;
std::string header_;
std::vector<std::string> properties_;
std::vector<PLYType> types_;
// TODO(hassan): change the name data_ into field_ (and field_sizes_).
std::vector<char*> data_;
std::vector<size_t> data_sizes_;
};
} // namespace npm
/*----------------------------------------------------------------------------*\
class PLYFileOut definition (inline functions)
\*----------------------------------------------------------------------------*/
#include <cassert>
#include <iostream>
template<typename T>
inline
bool npm::PLYFileOut::pushField(uint64_t num_pt, int dim, PLYType type,
std::initializer_list<std::string> properties, std::vector<T>& data) {
using std::cout; using std::endl;
// Perform checks on input data sizes
// ------------------------------------------------------------------------ //
if (num_points_ == 0) {num_points_ = num_pt;}
size_t in_num_props = properties.size();
size_t in_expected_data_size = dim * typeSize(type) * num_pt;
size_t in_data_size = sizeof(T) * data.size();
if (num_pt == 0) {
cout
<< "[PLYFileOut] error: field {";
for(auto& p: properties) {cout << p << " ";}
cout << "}" << endl;
cout
<< "Null number of points" << endl;
return false;
}
if (num_pt != num_points_) {
cout
<< "[PLYFileOut] error: field {";
for(auto& p: properties) {cout << p << " ";}
cout << "}" << endl;
cout
<< "Mismatch between previously specified number of "
<< "points (" << num_points_ << ") and requested number of points for "
<< "this field (" << num_pt << ")" << endl;
return false;
}
if ((size_t)dim != in_num_props) {
cout
<< "[PLYFileOut] error: field {";
for(auto& p: properties) {cout << p << " ";}
cout << "}" << endl;
cout
<< "Mismatch between specified dimension (" << dim
<< ") and number of input properties of this field ("
<< properties.size() << ")" << endl;
return false;
}
if (in_expected_data_size != in_data_size) {
cout
<< "[PLYFileOut] error: field {";
for(auto& p: properties) {cout << p << " ";}
cout << "}" << endl;
cout
<< "Mismatch between expected data size ("
<< in_expected_data_size << ") and number provided data size for this "
<< "field("<< in_data_size << ")" << endl;
return false;
}
// Save data internally
// ------------------------------------------------------------------------ //
for (auto& p: properties) {properties_.push_back(p);}
for (int k(0); k<dim; ++k) {types_.push_back(type);}
data_.push_back(reinterpret_cast<char*>(data.data()));
data_sizes_.push_back(sizeof(T));
return true;
}
inline
std::string npm::PLYFileOut::header() const {
return header_;
}
inline
void npm::PLYFileOut::set_internal_format(PLYFormat new_format) {
internal_format_ = new_format;
}
inline
void npm::PLYFileOut::set_ascii_write_precision(uint64_t ascci_write_prec) {
kASCIIWritePrecision = ascci_write_prec;
}
inline
void npm::PLYFileOut::set_write_buffer_size(uint64_t write_buffer_size) {
kWriteBufferSize = write_buffer_size;
}
#endif // NPM_TOOLBOX_IO_PLY_FILE_OUT_H_
| 5,498 | 28.564516 | 80 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/npm_ply/ply_types.h
|
/*----------------------------------------------------------------------------*\
NPM Toolbox: Various functions and classes for point cloud processing
--------------------------------------------------------------------------------
Author(s): Hassan Bouchiba
Creation: 09 Nov 2015
Description: This file contains auxiliary enums and functions for PLY I/O.
\*----------------------------------------------------------------------------*/
#ifndef NPM_TOOLBOX_IO_PLY_TYPES_H_
#define NPM_TOOLBOX_IO_PLY_TYPES_H_
#include <string>
namespace npm {
/*----------------------------------------------------------------------------*\
enums definition
\*----------------------------------------------------------------------------*/
// PLY internal data format
enum PLYFormat {
PLY_BINARY_LITTLE_ENDIAN,
PLY_BINARY_BIG_ENDIAN,
PLY_ASCII
};
// Basic type of a PLY property
enum PLYType {
PLY_CHAR,
PLY_UCHAR,
PLY_SHORT,
PLY_USHORT,
PLY_INT,
PLY_UINT,
PLY_FLOAT,
PLY_DOUBLE
};
/*----------------------------------------------------------------------------*\
static functions declaration
\*----------------------------------------------------------------------------*/
// Return the string of each type handled by PLY format
static std::string typeStr(PLYType type);
// Return the PLY type associated with the input string
static PLYType strType(std::string str);
// Return the size of each type handled by PLY format
static size_t typeSize(PLYType type);
} // namespace npm
/*----------------------------------------------------------------------------*\
static functions definition
\*----------------------------------------------------------------------------*/
#include <cassert>
inline
std::string npm::typeStr(PLYType type) {
switch (type) {
case PLY_CHAR: {return "char";}
case PLY_UCHAR: {return "uchar";}
case PLY_SHORT: {return "short";}
case PLY_USHORT: {return "ushort";}
case PLY_INT: {return "int";}
case PLY_UINT: {return "uint";}
case PLY_FLOAT: {return "float";}
case PLY_DOUBLE: {return "double";}
default: {assert(false); return "unknown";}
}
}
inline
npm::PLYType npm::strType(std::string str) {
if (str.compare("char") == 0) {return PLY_CHAR;}
else if (str.compare("uchar") == 0) {return PLY_UCHAR;}
else if (str.compare("short") == 0) {return PLY_SHORT;}
else if (str.compare("ushort") == 0) {return PLY_USHORT;}
else if (str.compare("int") == 0) {return PLY_INT;}
else if (str.compare("uint") == 0) {return PLY_UINT;}
else if ((str.compare("float32") == 0) |
(str.compare("float") == 0)) {return PLY_FLOAT;}
else if ((str.compare("float64") == 0) |
(str.compare("double") == 0)) {return PLY_DOUBLE;}
else {assert(false); return PLY_INT;}
}
inline
size_t npm::typeSize(PLYType type) {
switch (type) {
case PLY_CHAR: {return 1;}
case PLY_UCHAR: {return 1;}
case PLY_SHORT: {return 2;}
case PLY_USHORT: {return 2;}
case PLY_INT: {return 4;}
case PLY_UINT: {return 4;}
case PLY_FLOAT: {return 4;}
case PLY_DOUBLE: {return 8;}
default: {assert(false); return 4;}
}
}
#endif // NPM_TOOLBOX_IO_PLY_TYPES_H_
| 3,200 | 28.366972 | 80 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/pointmap_slam/pointmap_slam.h
|
#pragma once
#include <cstdint>
#include <cstdio>
#include <ctime>
#include <random>
#include <unordered_set>
#include <numeric>
#include <fstream>
#include <filesystem>
#define _USE_MATH_DEFINES
#include <math.h>
#include "ceres/ceres.h"
#include "glog/logging.h"
#include <Eigen/Eigenvalues>
#include "../cloud/cloud.h"
#include "../nanoflann/nanoflann.hpp"
#include "../grid_subsampling/grid_subsampling.h"
#include "../polar_processing/polar_processing.h"
#include "../pointmap/pointmap.h"
#include "../icp/icp.h"
using namespace std;
// KDTree type definition
typedef nanoflann::KDTreeSingleIndexAdaptor<nanoflann::L2_Simple_Adaptor<float, PointCloud>, PointCloud, 3> PointXYZ_KDTree;
typedef Eigen::Matrix<double, 6, 6> Matrix6d;
typedef Eigen::Matrix<double, 6, 1> Vector6d;
// ICP params and result classes
// *****************************
class SLAM_params
{
public:
// Elements
// ********
// Number of lines of scan in the lidar
int lidar_n_lines;
// Size of the map voxels
float map_voxel_size;
// Size of the voxels for frame subsampling
float frame_voxel_size;
// max distance travelled before frames are removed from local map
float local_map_dist;
// Account for motion distortion (fasle in the case of simulated data)
bool motion_distortion;
// Are we filtering frames
bool filtering;
// Should we save subsampled and aligned frames for loop closure
bool saving_for_loop_closure;
// Should we force flat ground
bool force_flat_ground;
// Are we using a barycenter pointmap (first-in pointmap otherwise)
bool barycenter_map;
// Are we updating the given initial map
bool update_init_map;
// Verbose option (time in sec between each verbose negative for no verbose)
float verbose_time;
// Transformation matrix from velodyne frame to base frame
Eigen::Matrix4d H_velo_base;
// Params of ICP used in this SLAM
ICP_params icp_params;
// Params of frame normal computation
float h_scale;
float r_scale;
int outl_rjct_passes;
float outl_rjct_thresh;
vector<float> polar_r2s;
float min_theta_radius;
// Methods
// *******
// Constructor
SLAM_params()
{
lidar_n_lines = 32;
min_theta_radius = 0.025;
map_voxel_size = 0.08;
frame_voxel_size = 0.2;
local_map_dist = 10.0;
motion_distortion = false;
filtering = false;
saving_for_loop_closure = false;
force_flat_ground = false;
barycenter_map = false;
update_init_map = true;
verbose_time = -1;
H_velo_base = Eigen::Matrix4d::Identity(4, 4);
h_scale = 0.3;
r_scale = 10.0;
outl_rjct_passes = 2;
outl_rjct_thresh = 0.003;
}
};
class PointMapSLAM
{
public:
// Elements
// ********
// Parameters
SLAM_params params;
// Map used by the algorithm
PointMap map;
PointMap map0;
// Pose of the last mapped frame
Eigen::Matrix4d last_H;
// Current pose correction from odometry to map
Eigen::Matrix4d H_OdomToMap;
// Indice of frame
int frame_i;
// Count errors to stop if this is not going well
int warning_count;
// Container for the motion corrected frame used to update the map
vector<PointXYZ> corrected_frame;
vector<double> corrected_scores;
float t_min, t_max;
// Methods
// *******
// Constructor
PointMapSLAM(SLAM_params slam_params0, vector<PointXYZ> &init_points, vector<PointXYZ> &init_normals, vector<float> &init_scores)
{
// Init paramters
params = slam_params0;
// Init map from previous session
map.dl = params.map_voxel_size;
map0.dl = params.map_voxel_size;
if (init_points.size() > 0)
{
map0.update_idx = -1;
map0.update(init_points, init_normals, init_scores, -1);
}
if (params.barycenter_map)
{
// We do not initialize the map to get a fresh new one
map.set_barycenter();
}
else
{
// Init map from previous session
if (init_points.size() > 0)
{
map.update_idx = -1;
map.update(init_points, init_normals, init_scores, -1);
}
}
// Dummy first last_H
last_H = Eigen::Matrix4d::Identity(4, 4);
H_OdomToMap = Eigen::Matrix4d::Identity(4, 4);
frame_i = 0;
warning_count = 0;
}
// Mapping functions
void init_map() { return; }
void add_new_frame(vector<PointXYZ> &f_pts,
vector<float> &f_ts,
vector<int> &f_rings,
Eigen::Matrix4d &H_OdomToScanner,
string save_path,
int verbose = 0);
};
// Function declaration
// ********************
void complete_map(string &frame_names,
vector<double> &frame_times,
Eigen::MatrixXd &slam_H,
vector<float> &slam_times,
PointMap& map,
vector<int> &loc_labels,
std::string &save_path,
std::string &time_name,
std::string &ring_name,
size_t start_ind,
size_t last_ind,
SLAM_params ¶ms);
void preprocess_frame(vector<PointXYZ> &f_pts,
vector<float> &f_ts,
vector<int> &f_rings,
vector<PointXYZ> &sub_pts,
vector<PointXYZ> &normals,
vector<float> &norm_scores,
vector<double> &icp_scores,
vector<size_t> &sub_inds,
Plane3D &frame_ground,
vector<float> &heights,
SLAM_params ¶ms,
vector<clock_t> &t);
void ceres_hello();
Eigen::MatrixXd call_on_sim_sequence(string &frame_names,
vector<double> &frame_times,
Eigen::MatrixXd >_H,
vector<double> >_t,
Eigen::MatrixXd &odom_H,
vector<PointXYZ> &init_pts,
vector<PointXYZ> &init_normals,
vector<float> &init_scores,
SLAM_params &slam_params,
string save_path);
Eigen::MatrixXd call_on_real_sequence(string &frame_names,
vector<double> &frame_times,
Eigen::MatrixXd &odom_H,
vector<PointXYZ> &init_pts,
vector<PointXYZ> &init_normals,
vector<float> &init_scores,
SLAM_params &slam_params,
string save_path);
| 5,832 | 22.239044 | 130 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/polar_processing/polar_processing.h
|
#pragma once
#include <set>
#include <cstdint>
#include <cstdio>
#include <ctime>
#define _USE_MATH_DEFINES
#include <math.h>
#include <Eigen/Eigenvalues>
#include "../cloud/cloud.h"
#include "../nanoflann/nanoflann.hpp"
using namespace std;
// KDTree type definition
typedef nanoflann::KDTreeSingleIndexAdaptor<nanoflann::L2_Simple_Adaptor<float, PointCloud>, PointCloud, 3> PointXYZ_KDTree;
void cart2pol_(vector<PointXYZ> &xyz);
inline PointXYZ cart2pol(const PointXYZ p)
{
float tmp1 = p.x * p.x + p.y * p.y;
float tmp2 = tmp1 + p.z * p.z;
return PointXYZ(sqrt(tmp2), atan2(p.z, sqrt(tmp1)), atan2(p.y, p.x) + M_PI / 2);
}
void get_min_max_times(vector<float> &f_ts, float &t_min, float &t_max, float loop_ratio);
void pca_features(vector<PointXYZ> &points,
vector<float> &eigenvalues,
vector<PointXYZ> &eigenvectors);
void detect_outliers(vector<PointXYZ> &rtp,
vector<float> &scores,
int lidar_n_lines,
float lidar_angle_res,
float minTheta,
int n_pass,
float threshold);
float get_lidar_angle_res(vector<PointXYZ> &rtp, float &minTheta, float &maxTheta, int lidar_n_lines);
void get_lidar_angles(vector<PointXYZ> &rtp, vector<float> &ring_angles, int lidar_n_lines);
void lidar_log_radius(vector<PointXYZ> &rtp, float polar_r, float r_scale);
void lidar_horizontal_scale(vector<PointXYZ> &rtp, float h_scale);
void extract_features_multi_thread(vector<PointXYZ> &points,
vector<PointXYZ> &normals,
vector<float> &planarity,
vector<float> &linearity,
int lidar_n_lines,
float h_scale,
float r_scale,
int verbose);
void smart_normal_score(vector<PointXYZ> &points,
vector<PointXYZ> &polar_pts,
vector<PointXYZ> &normals,
vector<float> &scores);
void smart_icp_score(vector<PointXYZ> &polar_pts,
vector<PointXYZ> &normals,
vector<float> &heights,
vector<double> &scores);
// void compare_map_to_frame(vector<PointXYZ> &frame_points,
// vector<PointXYZ> &map_points,
// vector<PointXYZ> &map_normals,
// unordered_map<VoxKey, size_t> &map_samples,
// Eigen::Matrix3d R_d,
// Eigen::Vector3d T_d,
// float theta_dl,
// float phi_dl,
// float map_dl,
// vector<float> &movable_probs,
// vector<int> &movable_counts);
void extract_lidar_frame_normals(vector<PointXYZ> &points,
vector<PointXYZ> &polar_pts,
vector<PointXYZ> &queries,
vector<PointXYZ> &polar_queries,
vector<int> &polar_rings,
vector<PointXYZ> &normals,
vector<float> &norm_scores,
vector<float> &polar_r2s);
| 2,700 | 29.011111 | 124 |
h
|
Crystal_Ball_Nav
|
Crystal_Ball_Nav-master/SOGM-3D-2D-Net/cpp_wrappers/src/region_growing/region_growing.h
|
#pragma once
#include <set>
#include <cstdint>
#include <cstdio>
#include <ctime>
#include <queue>
#include <numeric>
#include <random>
#define _USE_MATH_DEFINES
#include <math.h>
#include <Eigen/Eigenvalues>
#include "../cloud/cloud.h"
#include "../polar_processing/polar_processing.h"
#include "../pointmap/pointmap.h"
using namespace std;
// ICP params and result classes
// *****************************
class Plane3D
{
public:
// Elements
// ********
// The plane is define by the equation a*x + b*y + c*z = d. The values (a, b, c) are stored in a PointXYZ called u.
PointXYZ u;
float d;
// Methods
// *******
// Constructor
Plane3D() { u.x = 1; u.y = 0; u.z = 0; d = 0; }
Plane3D(const float a0, const float b0, const float c0, const float d0) { u.x = a0; u.y = b0; u.z = c0; d = d0; }
Plane3D(const PointXYZ P0, const PointXYZ N0)
{
// Init with point and normal
u = N0;
d = N0.dot(P0);
}
Plane3D(const PointXYZ A, const PointXYZ B, const PointXYZ C)
{
// Init with three points
u = (B - A).cross(C - A);
d = u.dot(A);
}
// Method getting distance to one point
float point_distance(const PointXYZ P)
{
return abs((u.dot(P) - d) / sqrt(u.sq_norm()));
}
// Method getting square distance to one point
float point_sq_dist(const PointXYZ P)
{
float tmp = u.dot(P) - d;
return tmp * tmp / u.sq_norm();
}
// Method getting distances to some points
void point_distances(vector<PointXYZ>& points, vector<float>& distances)
{
size_t i = 0;
float inv_norm_u = 1 / sqrt(u.sq_norm());
for (auto& p : points)
{
distances[i] = abs((u.dot(p) - d) * inv_norm_u);
i++;
}
}
// Method updating the plane by least square fitting
void update(vector<PointXYZ>& points)
{
// Least square optimal plane :
// ****************************
// Instead of solving the least square problem (which is has singularities) We use PCA
//
// The best plane always intersect the points centroid, and then its normal is the third eigenvector
// Safe check
if (points.size() < 4)
return;
// Compute PCA
PointXYZ mean = accumulate(points.begin(), points.end(), PointXYZ());
mean = mean * (1.0 / points.size());
// Create centralized data
vector<PointXYZ> points_c(points);
for (auto& p : points_c)
p -= mean;
// Create a N by 3 matrix containing the points (same data in memory)
Eigen::Map<Eigen::Matrix<float, 3, Eigen::Dynamic>> X_c((float*)points_c.data(), 3, points_c.size());
// Compute covariance matrix
Eigen::Matrix3f cov(X_c * X_c.transpose() / points.size());
// Compute pca
Eigen::SelfAdjointEigenSolver<Eigen::Matrix3f> es;
es.compute(cov);
// Convert back to std containers
vector<float> eigenvalues(es.eigenvalues().data(), es.eigenvalues().data() + es.eigenvalues().size());
vector<PointXYZ> eigenvectors((PointXYZ*)es.eigenvectors().data(), (PointXYZ*)es.eigenvectors().data() + es.eigenvectors().rows());
// Define plane with point and normal
u = eigenvectors[0];
d = u.dot(mean);
}
};
class RG_params
{
public:
// Elements
// ********
// Threshold on normal variation (angle in radian)
float norm_thresh;
// Threshold on point to plane distance
float dist_thresh;
// Min number of points to keep a plane
int min_points;
// Maximum number of plane kept
int max_planes;
// Methods
// *******
// Constructor
RG_params()
{
norm_thresh = 0.1;
dist_thresh = 0.1;
min_points = 500;
max_planes = 50;
}
};
void get_lidar_image(vector<PointXYZ>& rtp,
vector<int>& image,
int lidar_n_lines,
float lidar_angle_res,
float minTheta);
void lidar_plane_growing(vector<PointXYZ>& points,
vector<PointXYZ>& normals,
vector<int>& plane_inds,
vector<Plane3D>& planes,
int lidar_n_lines,
RG_params params);
void pointmap_plane_growing(vector<PointXYZ>& points,
vector<PointXYZ>& normals,
vector<int>& plane_inds,
vector<Plane3D>& planes,
float dl,
RG_params params);
| 3,932 | 21.095506 | 133 |
h
|
XNNPACK
|
XNNPACK-master/bench/dconv.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <benchmark/benchmark.h>
#define BENCHMARK_DCONV(conv_fn) \
BENCHMARK_CAPTURE(conv_fn, mobilenet_v1, "MobileNet v1/v2")->Apply(MobileNetConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(conv_fn, mobilenet_v3, "MobileNet v3")->Apply(MobileNetV3ConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(conv_fn, shufflenet, "ShuffleNet v1/v2")->Apply(ShuffleNetConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(conv_fn, squeezenet_v11, "SqueezeNet 1.1")->Apply(SqueezeNetV11ConvArguments)->UseRealTime();
// ShuffleNet v1/v2.
static void ShuffleNetConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "Cout"});
/********* Conv 1 ********/
/* H W GCout */
b->Args({224, 224, 24});
}
// MobileNet v1/v2.
static void MobileNetConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "Cout"});
/* H W GCout */
b->Args({224, 224, 32});
}
// MobileNet v3 Small/Large.
static void MobileNetV3ConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "Cout"});
/******************* Initial Stage *******************/
/* H W GCout */
b->Args({224, 224, 16});
}
// SqueezeNet 1.1
static void SqueezeNetV11ConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "GCout"});
/*********************** Conv 1 **********************/
/* H W GCout */
b->Args({224, 224, 64});
}
| 1,697 | 29.872727 | 113 |
h
|
XNNPACK
|
XNNPACK-master/bench/dwconv.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <benchmark/benchmark.h>
#define BENCHMARK_DWCONV(dwconv_fn) \
BENCHMARK_CAPTURE(dwconv_fn, mobilenet_v1, "MobileNet v1")->Apply(MobileNetV1DWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, mobilenet_v2, "MobileNet v2")->Apply(MobileNetV2DWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, mobilenet_v3_small, "MobileNet v3 Small")->Apply(MobileNetV3SmallDWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, mobilenet_v3_large, "MobileNet v3 Large")->Apply(MobileNetV3LargeDWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, shufflenet_v1_g1, "ShuffleNet v1 (1 group)")->Apply(ShuffleNetV1G1DWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, shufflenet_v1_g2, "ShuffleNet v1 (2 groups)")->Apply(ShuffleNetV1G2DWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, shufflenet_v1_g3, "ShuffleNet v1 (3 groups)")->Apply(ShuffleNetV1G3DWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, shufflenet_v1_g4, "ShuffleNet v1 (4 groups)")->Apply(ShuffleNetV1G4DWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, shufflenet_v1_g8, "ShuffleNet v1 (8 groups)")->Apply(ShuffleNetV1G8DWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, shufflenet_v2_x05, "ShuffleNet v2 0.5X")->Apply(ShuffleNetV2X05DWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, shufflenet_v2_x10, "ShuffleNet v2 1.0X")->Apply(ShuffleNetV2X10DWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, shufflenet_v2_x15, "ShuffleNet v2 1.5X")->Apply(ShuffleNetV2X15DWConvArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(dwconv_fn, shufflenet_v2_x20, "ShuffleNet v2 2.0X")->Apply(ShuffleNetV2X20DWConvArguments)->UseRealTime();
// ShuffleNet v1 with 1 group.
static void ShuffleNetV1G1DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/********* Stage 2: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 36});
/********* Stage 2: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 36});
/********* Stage 3: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 72});
/********* Stage 3: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 72});
/********* Stage 4: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 144});
/********* Stage 4: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({ 7, 7, 3, 3, 2, 2, 2, 1, 144});
}
// ShuffleNet v1 with 2 groups.
static void ShuffleNetV1G2DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/********* Stage 2: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 50});
/********* Stage 2: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 50});
/********* Stage 3: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 100});
/********* Stage 3: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 100});
/********* Stage 4: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 200});
/********* Stage 4: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({ 7, 7, 3, 3, 2, 2, 2, 1, 200});
}
// ShuffleNet v1 with 3 groups.
static void ShuffleNetV1G3DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/********* Stage 2: stride-2 unit **********/
/* H W KH KW PH PW S D G */
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 60});
/********* Stage 2: stride-1 units *********/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 60});
/********* Stage 3: stride-2 unit **********/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 120});
/********* Stage 3: stride-1 units *********/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 120});
/********* Stage 4: stride-2 unit **********/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 240});
/********* Stage 4: stride-1 units *********/
/* H W KH KW PH PW S D G */
b->Args({ 7, 7, 3, 3, 2, 2, 2, 1, 240});
}
// ShuffleNet v1 with 4 groups.
static void ShuffleNetV1G4DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/********* Stage 2: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 68});
/********* Stage 2: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 68});
/********* Stage 3: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 136});
/********* Stage 3: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 136});
/********* Stage 4: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 272});
/********* Stage 4: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({ 7, 7, 3, 3, 2, 2, 2, 1, 272});
}
// ShuffleNet v1 with 8 groups.
static void ShuffleNetV1G8DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/********* Stage 2: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 96});
/********* Stage 2: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 96});
/********* Stage 3: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 192});
/********* Stage 3: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 192});
/********* Stage 4: stride-2 unit *********/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 384});
/********* Stage 4: stride-1 units ********/
/* H W KH KW PH PW S D G */
b->Args({ 7, 7, 3, 3, 2, 2, 2, 1, 384});
}
// ShuffleNet v2 (0.5X scale)
static void ShuffleNetV2X05DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/**************** Stage 2 *****************/
/* H W KH KW PH PW S D G */
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 24});
b->Args({28, 28, 3, 3, 2, 2, 1, 1, 24});
/**************** Stage 3 *****************/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 48});
b->Args({14, 14, 3, 3, 2, 2, 1, 1, 48});
/**************** Stage 4 *****************/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 96});
b->Args({ 7, 7, 3, 3, 2, 2, 1, 1, 96});
}
// ShuffleNet v2 (1.0X scale)
static void ShuffleNetV2X10DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/**************** Stage 2 *****************/
/* H W KH KW PH PW S D G */
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 24});
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 58});
b->Args({28, 28, 3, 3, 2, 2, 1, 1, 58});
/**************** Stage 3 *****************/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 116});
b->Args({14, 14, 3, 3, 2, 2, 1, 1, 116});
/**************** Stage 4 *****************/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 232});
b->Args({ 7, 7, 3, 3, 2, 2, 1, 1, 232});
}
// ShuffleNet v2 (1.5X scale)
static void ShuffleNetV2X15DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/**************** Stage 2 *****************/
/* H W KH KW PH PW S D G */
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 24});
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 88});
b->Args({28, 28, 3, 3, 2, 2, 1, 1, 88});
/**************** Stage 3 *****************/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 176});
b->Args({14, 14, 3, 3, 2, 2, 1, 1, 176});
/**************** Stage 4 *****************/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 352});
b->Args({ 7, 7, 3, 3, 2, 2, 1, 1, 352});
}
// ShuffleNet v2 (2.0X scale)
static void ShuffleNetV2X20DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/***************** Stage 2 ****************/
/* H W KH KW PH PW S D G */
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 24});
b->Args({56, 56, 3, 3, 2, 2, 2, 1, 122});
b->Args({28, 28, 3, 3, 2, 2, 1, 1, 122});
/***************** Stage 3 ****************/
/* H W KH KW PH PW S D G */
b->Args({28, 28, 3, 3, 2, 2, 2, 1, 244});
b->Args({14, 14, 3, 3, 2, 2, 1, 1, 244});
/***************** Stage 4 ****************/
/* H W KH KW PH PW S D G */
b->Args({14, 14, 3, 3, 2, 2, 2, 1, 488});
b->Args({ 7, 7, 3, 3, 2, 2, 1, 1, 488});
}
static void MobileNetV1DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/* H W KH KW PH PW S D G */
b->Args({112, 112, 3, 3, 2, 2, 1, 1, 32});
b->Args({112, 112, 3, 3, 2, 2, 2, 1, 64});
b->Args({ 56, 56, 3, 3, 2, 2, 1, 1, 128});
b->Args({ 56, 56, 3, 3, 2, 2, 2, 1, 128});
b->Args({ 28, 28, 3, 3, 2, 2, 1, 1, 256});
b->Args({ 28, 28, 3, 3, 2, 2, 2, 1, 256});
b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 512});
b->Args({ 14, 14, 3, 3, 2, 2, 2, 1, 512});
b->Args({ 7, 7, 3, 3, 2, 2, 1, 1, 1024});
}
static void MobileNetV2DWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/**************** Bottleneck 1 ***************/
/* H W KH KW PH PW S D G */
b->Args({112, 112, 3, 3, 2, 2, 1, 1, 32});
/**************** Bottleneck 2 ***************/
/* H W KH KW PH PW S D G */
b->Args({112, 112, 3, 3, 2, 2, 2, 1, 96});
b->Args({ 56, 56, 3, 3, 2, 2, 1, 1, 144});
/**************** Bottleneck 3 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 56, 56, 3, 3, 2, 2, 2, 1, 144});
b->Args({ 28, 28, 3, 3, 2, 2, 1, 1, 192});
//b->Args({ 28, 28, 3, 3, 2, 2, 1, 1, 192});
/**************** Bottleneck 4 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 28, 28, 3, 3, 2, 2, 2, 1, 192});
b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 384});
//b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 384});
//b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 384});
/**************** Bottleneck 5 ***************/
/* H W KH KW PH PW S D G */
//b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 384});
b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 576});
//b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 576});
/**************** Bottleneck 6 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 14, 14, 3, 3, 2, 2, 2, 1, 576});
b->Args({ 7, 7, 3, 3, 2, 2, 1, 1, 960});
//b->Args({ 7, 7, 3, 3, 2, 2, 1, 1, 960});
/**************** Bottleneck 7 ***************/
/* H W KH KW PH PW S D G */
//b->Args({ 7, 7, 3, 3, 2, 2, 1, 1, 960});
}
static void MobileNetV3SmallDWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/*************** Bottleneck 1 ***************/
/* H W KH KW PH PW S D G */
b->Args({112, 112, 3, 3, 2, 2, 2, 1, 16});
/*************** Bottleneck 2 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 56, 56, 3, 3, 2, 2, 2, 1, 72});
/*************** Bottleneck 3 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 28, 28, 3, 3, 2, 2, 1, 1, 88});
/*************** Bottleneck 4 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 28, 28, 5, 5, 4, 4, 2, 1, 96});
/*************** Bottleneck 5 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 14, 14, 5, 5, 4, 4, 1, 1, 240});
/*************** Bottleneck 6 ***************/
/* H W KH KW PH PW S D G */
//b->Args({ 14, 14, 5, 5, 4, 4, 1, 1, 240});
/*************** Bottleneck 7 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 14, 14, 5, 5, 4, 4, 1, 1, 120});
/*************** Bottleneck 8 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 14, 14, 5, 5, 4, 4, 1, 1, 144});
/*************** Bottleneck 9 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 14, 14, 5, 5, 4, 4, 2, 1, 288});
/*************** Bottleneck 10 **************/
/* H W KH KW PH PW S D G */
b->Args({ 7, 7, 5, 5, 4, 4, 1, 1, 576});
/*************** Bottleneck 11 **************/
/* H W KH KW PH PW S D G */
//b->Args({ 7, 7, 5, 5, 4, 4, 1, 1, 576});
}
static void MobileNetV3LargeDWConvArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"H", "W", "KH", "KW", "PH", "PW", "S", "D", "G"});
/*************** Bottleneck 1 ***************/
/* H W KH KW PH PW S D G */
b->Args({112, 112, 3, 3, 2, 2, 1, 1, 16});
/*************** Bottleneck 2 ***************/
/* H W KH KW PH PW S D G */
b->Args({112, 112, 3, 3, 2, 2, 2, 1, 64});
/*************** Bottleneck 3 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 56, 56, 3, 3, 2, 2, 1, 1, 72});
/*************** Bottleneck 4 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 56, 56, 5, 5, 4, 4, 2, 1, 72});
/*************** Bottleneck 5 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 28, 28, 5, 5, 4, 4, 1, 1, 120});
/*************** Bottleneck 6 ***************/
/* H W KH KW PH PW S D G */
//b->Args({ 28, 28, 5, 5, 4, 4, 1, 1, 120});
/*************** Bottleneck 7 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 28, 28, 3, 3, 2, 2, 2, 1, 240});
/*************** Bottleneck 8 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 200});
/*************** Bottleneck 9 ***************/
/* H W KH KW PH PW S D G */
b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 184});
/*************** Bottleneck 10 **************/
/* H W KH KW PH PW S D G */
//b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 184});
/*************** Bottleneck 11 **************/
/* H W KH KW PH PW S D G */
b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 480});
/*************** Bottleneck 12 **************/
/* H W KH KW PH PW S D G */
b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 672});
/*************** Bottleneck 13 **************/
/* H W KH KW PH PW S D G */
b->Args({ 14, 14, 5, 5, 4, 4, 2, 1, 672});
/*************** Bottleneck 14 **************/
/* H W KH KW PH PW S D G */
b->Args({ 7, 7, 5, 5, 4, 4, 1, 1, 960});
/*************** Bottleneck 15 **************/
/* H W KH KW PH PW S D G */
//b->Args({ 7, 7, 5, 5, 4, 4, 1, 1, 960});
}
| 16,821 | 44.588076 | 132 |
h
|
XNNPACK
|
XNNPACK-master/bench/spmm.h
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <benchmark/benchmark.h>
#define BENCHMARK_SPMM(spmm_fn) \
BENCHMARK_CAPTURE(spmm_fn, mobilenet_v1, "MobileNet v1")->Apply(MobileNetV1SpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, mobilenet_v2, "MobileNet v2")->Apply(MobileNetV2SpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, mobilenet_v3_small, "MobileNet v3 Small")->Apply(MobileNetV3SmallSpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, mobilenet_v3_large, "MobileNet v3 Large")->Apply(MobileNetV3LargeSpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, shufflenet_v1_g1, "ShuffleNet v1 (1 group)")->Apply(ShuffleNetV1G1SpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, shufflenet_v1_g2, "ShuffleNet v1 (2 groups)")->Apply(ShuffleNetV1G2SpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, shufflenet_v1_g3, "ShuffleNet v1 (3 groups)")->Apply(ShuffleNetV1G3SpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, shufflenet_v1_g4, "ShuffleNet v1 (4 groups)")->Apply(ShuffleNetV1G4SpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, shufflenet_v1_g8, "ShuffleNet v1 (8 groups)")->Apply(ShuffleNetV1G8SpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, shufflenet_v2_x05, "ShuffleNet v2 0.5X")->Apply(ShuffleNetV2X05SpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, shufflenet_v2_x10, "ShuffleNet v2 1.0X")->Apply(ShuffleNetV2X10SpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, shufflenet_v2_x15, "ShuffleNet v2 1.5X")->Apply(ShuffleNetV2X15SpmmArguments)->UseRealTime(); \
BENCHMARK_CAPTURE(spmm_fn, shufflenet_v2_x20, "ShuffleNet v2 2.0X")->Apply(ShuffleNetV2X20SpmmArguments)->UseRealTime();
// ShuffleNet v1 with 1 group.
static void ShuffleNetV1G1SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/* M N K */
b->Args({56 * 56, 36, 24});
b->Args({28 * 28, 120, 36});
b->Args({28 * 28, 36, 144});
b->Args({28 * 28, 144, 36});
b->Args({28 * 28, 72, 144});
b->Args({14 * 14, 144, 72});
b->Args({14 * 14, 72, 288});
b->Args({14 * 14, 288, 72});
b->Args({14 * 14, 144, 288});
b->Args({ 7 * 7, 288, 144});
b->Args({ 7 * 7, 144, 576});
b->Args({ 7 * 7, 576, 144});
}
// ShuffleNet v1 with 2 groups.
static void ShuffleNetV1G2SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/* M N K */
b->Args({56 * 56, 50, 24});
b->Args({28 * 28, 88, 25});
b->Args({28 * 28, 25, 100});
b->Args({28 * 28, 100, 25});
b->Args({28 * 28, 50, 100});
b->Args({14 * 14, 100, 50});
b->Args({14 * 14, 50, 200});
b->Args({14 * 14, 200, 50});
b->Args({14 * 14, 100, 200});
b->Args({ 7 * 7, 200, 100});
b->Args({ 7 * 7, 100, 400});
b->Args({ 7 * 7, 400, 100});
}
// ShuffleNet v1 with 3 groups.
static void ShuffleNetV1G3SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/* M N K */
b->Args({56 * 56, 60, 24});
b->Args({28 * 28, 72, 20});
b->Args({28 * 28, 20, 80});
b->Args({28 * 28, 80, 20});
b->Args({28 * 28, 40, 80});
b->Args({14 * 14, 80, 40});
b->Args({14 * 14, 40, 160});
b->Args({14 * 14, 160, 40});
b->Args({14 * 14, 80, 160});
b->Args({ 7 * 7, 160, 80});
b->Args({ 7 * 7, 80, 320});
b->Args({ 7 * 7, 320, 80});
}
// ShuffleNet v1 with 4 groups.
static void ShuffleNetV1G4SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/* M N K */
b->Args({56 * 56, 68, 24});
b->Args({28 * 28, 62, 17});
b->Args({28 * 28, 17, 68});
b->Args({28 * 28, 68, 17});
b->Args({28 * 28, 34, 68});
b->Args({14 * 14, 68, 34});
b->Args({14 * 14, 34, 136});
b->Args({14 * 14, 136, 34});
b->Args({14 * 14, 68, 136});
b->Args({ 7 * 7, 136, 68});
b->Args({ 7 * 7, 68, 272});
b->Args({ 7 * 7, 272, 68});
}
// ShuffleNet v1 with 8 groups.
static void ShuffleNetV1G8SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/* M N K */
b->Args({56 * 56, 96, 24});
b->Args({28 * 28, 45, 12});
b->Args({28 * 28, 12, 48});
b->Args({28 * 28, 48, 12});
b->Args({28 * 28, 24, 48});
b->Args({14 * 14, 48, 24});
b->Args({14 * 14, 24, 96});
b->Args({14 * 14, 96, 24});
b->Args({14 * 14, 48, 96});
b->Args({ 7 * 7, 96, 48});
b->Args({ 7 * 7, 48, 192});
b->Args({ 7 * 7, 192, 48});
}
// ShuffleNet v2 (0.5X scale)
static void ShuffleNetV2X05SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/* M N K */
b->Args({56 * 56, 24, 24});
b->Args({28 * 28, 24, 24});
b->Args({28 * 28, 48, 48});
b->Args({14 * 14, 48, 48});
b->Args({14 * 14, 96, 96});
b->Args({ 7 * 7, 96, 96});
b->Args({ 7 * 7, 1024, 192});
}
// ShuffleNet v2 (1.0X scale)
static void ShuffleNetV2X10SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/* M N K */
b->Args({56 * 56, 58, 24});
b->Args({28 * 28, 58, 24});
b->Args({28 * 28, 58, 58});
b->Args({14 * 14, 116, 116});
b->Args({14 * 14, 116, 116});
b->Args({14 * 14, 232, 232});
b->Args({ 7 * 7, 232, 232});
b->Args({ 7 * 7, 1024, 464});
}
// ShuffleNet v2 (1.5X scale)
static void ShuffleNetV2X15SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/* M N K */
b->Args({56 * 56, 88, 24});
b->Args({28 * 28, 88, 24});
b->Args({28 * 28, 88, 88});
b->Args({28 * 28, 176, 176});
b->Args({14 * 14, 176, 176});
b->Args({14 * 14, 352, 352});
b->Args({ 7 * 7, 352, 352});
b->Args({ 7 * 7, 1024, 704});
}
// ShuffleNet v2 (2.0X scale)
static void ShuffleNetV2X20SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/* M N K */
b->Args({56 * 56, 122, 24});
b->Args({28 * 28, 122, 24});
b->Args({28 * 28, 122, 122});
b->Args({28 * 28, 244, 244});
b->Args({14 * 14, 244, 244});
b->Args({14 * 14, 488, 488});
b->Args({ 7 * 7, 488, 488});
b->Args({ 7 * 7, 2048, 976});
}
static void MobileNetV1SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/* M N K */
b->Args({112 * 112, 64, 32});
b->Args({ 56 * 56, 128, 64});
b->Args({ 56 * 56, 128, 128});
b->Args({ 28 * 28, 256, 128});
b->Args({ 28 * 28, 256, 256});
b->Args({ 14 * 14, 512, 256});
b->Args({ 14 * 14, 512, 512});
b->Args({ 7 * 7, 1024, 512});
b->Args({ 7 * 7, 1024, 1024});
}
static void MobileNetV2SpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/******** Bottleneck 1 *******/
/* M N K */
b->Args({112 * 112, 16, 32});
/******** Bottleneck 2 *******/
/* M N K */
b->Args({112 * 112, 96, 16});
b->Args({ 56 * 56, 24, 96});
b->Args({ 56 * 56, 144, 24});
b->Args({ 56 * 56, 24, 144});
/******** Bottleneck 3 *******/
/* M N K */
b->Args({ 28 * 28, 32, 144});
b->Args({ 28 * 28, 192, 32});
b->Args({ 28 * 28, 32, 192});
/******** Bottleneck 4 *******/
/* M N K */
b->Args({ 14 * 14, 64, 192});
b->Args({ 14 * 14, 384, 64});
b->Args({ 14 * 14, 64, 384});
/******** Bottleneck 5 *******/
/* M N K */
b->Args({ 14 * 14, 96, 384});
b->Args({ 14 * 14, 576, 96});
b->Args({ 14 * 14, 96, 576});
/******** Bottleneck 6 *******/
/* M N K */
b->Args({ 7 * 7, 160, 576});
b->Args({ 7 * 7, 960, 160});
b->Args({ 7 * 7, 160, 960});
/******** Bottleneck 7 *******/
/* M N K */
b->Args({ 7 * 7, 320, 960});
/***** Pre-pooling Conv2D ****/
/* M N K */
b->Args({ 7 * 7, 1280, 320});
}
static void MobileNetV3SmallSpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/****** Bottleneck 1 ******/
/* M N K */
b->Args({ 1 * 1, 8, 16});
b->Args({ 1 * 1, 16, 8});
b->Args({56 * 56, 16, 16});
/****** Bottleneck 2 ******/
/* M N K */
b->Args({56 * 56, 72, 16});
b->Args({28 * 28, 24, 72});
/****** Bottleneck 3 ******/
/* M N K */
b->Args({28 * 28, 88, 24});
b->Args({28 * 28, 24, 88});
/****** Bottleneck 4 ******/
/* M N K */
b->Args({28 * 28, 96, 24});
b->Args({ 1 * 1, 24, 96});
b->Args({ 1 * 1, 96, 24});
b->Args({14 * 14, 40, 96});
/****** Bottleneck 5 ******/
/* M N K */
b->Args({14 * 14, 240, 40});
b->Args({ 1 * 1, 64, 240});
b->Args({ 1 * 1, 240, 64});
b->Args({14 * 14, 40, 240});
/****** Bottleneck 6 ******/
/* M N K */
//b->Args({14 * 14, 240, 40});
//b->Args({ 1 * 1, 64, 240});
//b->Args({ 1 * 1, 240, 64});
//b->Args({14 * 14, 40, 240});
/****** Bottleneck 7 ******/
/* M N K */
b->Args({14 * 14, 120, 40});
b->Args({ 1 * 1, 32, 120});
b->Args({ 1 * 1, 120, 32});
b->Args({14 * 14, 48, 120});
/****** Bottleneck 8 ******/
/* M N K */
b->Args({14 * 14, 144, 48});
b->Args({ 1 * 1, 40, 144});
b->Args({ 1 * 1, 144, 40});
b->Args({14 * 14, 48, 144});
/****** Bottleneck 9 ******/
/* M N K */
b->Args({14 * 14, 288, 48});
b->Args({ 1 * 1, 72, 288});
b->Args({ 1 * 1, 288, 72});
b->Args({ 7 * 7, 96, 288});
/****** Bottleneck 10 *****/
/* M N K */
b->Args({ 7 * 7, 576, 96});
b->Args({ 1 * 1, 144, 576});
b->Args({ 1 * 1, 576, 144});
b->Args({ 7 * 7, 96, 576});
/****** Bottleneck 11 *****/
/* M N K */
//b->Args({ 7 * 7, 576, 96});
//b->Args({ 1 * 1, 144, 576});
//b->Args({ 1 * 1, 576, 144});
//b->Args({ 7 * 7, 96, 576});
/******* Last Stage *******/
/* M N K */
//b->Args({ 7 * 7, 576, 96});
}
static void MobileNetV3LargeSpmmArguments(benchmark::internal::Benchmark* b) {
b->ArgNames({"M", "N", "K"});
/******* Bottleneck 1 *******/
/* M N K */
b->Args({112 * 112, 16, 16});
/******* Bottleneck 2 *******/
/* M N K */
b->Args({112 * 112, 64, 16});
b->Args({ 56 * 56, 24, 64});
/******* Bottleneck 3 *******/
/* M N K */
b->Args({ 56 * 56, 72, 24});
b->Args({ 56 * 56, 24, 72});
/******* Bottleneck 4 *******/
/* M N K */
//b->Args({ 56 * 56, 72, 24});
b->Args({ 1 * 1, 24, 72});
b->Args({ 1 * 1, 72, 24});
b->Args({ 28 * 28, 40, 72});
/******* Bottleneck 5 *******/
/* M N K */
b->Args({ 28 * 28, 120, 40});
b->Args({ 1 * 1, 32, 120});
b->Args({ 1 * 1, 120, 32});
b->Args({ 28 * 28, 40, 120});
/******* Bottleneck 6 *******/
/* M N K */
//b->Args({ 28 * 28, 120, 40});
//b->Args({ 1 * 1, 32, 120});
//b->Args({ 1 * 1, 120, 32});
//b->Args({ 28 * 28, 40, 120});
/******* Bottleneck 7 *******/
/* M N K */
b->Args({ 28 * 28, 240, 40});
b->Args({ 14 * 14, 80, 240});
/******* Bottleneck 8 *******/
/* M N K */
b->Args({ 14 * 14, 200, 80});
b->Args({ 14 * 14, 80, 200});
/******* Bottleneck 9 *******/
/* M N K */
b->Args({ 14 * 14, 184, 80});
b->Args({ 14 * 14, 80, 184});
/******* Bottleneck 10 ******/
/* M N K */
b->Args({ 14 * 14, 184, 80});
b->Args({ 14 * 14, 80, 184});
/******* Bottleneck 11 ******/
/* M N K */
b->Args({ 14 * 14, 480, 80});
b->Args({ 1 * 1, 120, 480});
b->Args({ 1 * 1, 480, 120});
b->Args({ 14 * 14, 112, 480});
/******* Bottleneck 12 ******/
/* M N K */
b->Args({ 14 * 14, 672, 112});
b->Args({ 1 * 1, 168, 672});
b->Args({ 1 * 1, 672, 168});
b->Args({ 14 * 14, 112, 672});
/******* Bottleneck 13 ******/
/* M N K */
//b->Args({ 14 * 14, 672, 112});
//b->Args({ 1 * 1, 168, 672});
//b->Args({ 1 * 1, 672, 168});
b->Args({ 7 * 7, 160, 672});
/******* Bottleneck 14 ******/
/* M N K */
b->Args({ 7 * 7, 960, 160});
b->Args({ 1 * 1, 240, 960});
b->Args({ 1 * 1, 960, 240});
b->Args({ 7 * 7, 160, 960});
/******* Bottleneck 15 ******/
/* M N K */
//b->Args({ 7 * 7, 960, 160});
//b->Args({ 1 * 1, 240, 960});
//b->Args({ 1 * 1, 960, 240});
//b->Args({ 7 * 7, 160, 960});
/******** Last Stage *******/
/* M N K */
//b->Args({ 7 * 7, 960, 160});
}
| 13,053 | 32.818653 | 128 |
h
|
XNNPACK
|
XNNPACK-master/bench/utils.h
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <cstddef>
#include <cstdint>
#include <xnnpack.h>
#include <xnnpack/common.h>
#include <xnnpack/memory.h>
#include <benchmark/benchmark.h>
namespace benchmark {
namespace utils {
uint32_t WipeCache();
uint32_t PrefetchToL1(const void* ptr, size_t size);
// Disable support for denormalized numbers in floating-point units.
void DisableDenormals();
// Return clock rate, in Hz, for the currently used logical processor.
uint64_t GetCurrentCpuFrequency();
// Return maximum (across all cores/clusters/sockets) last level cache size.
// Can overestimate, but not underestimate LLC size.
size_t GetMaxCacheSize();
// Set number of elements for a reduction microkernel such that:
// - It is divisible by 2, 3, 4, 5, 6.
// - It is divisible by AVX512 width.
// - Total memory footprint does not exceed the characteristic cache size for
// the architecture.
template<class InType>
void ReductionParameters(benchmark::internal::Benchmark* benchmark) {
benchmark->ArgName("N");
size_t characteristic_l1 = 32 * 1024;
size_t characteristic_l2 = 256 * 1024;
#if XNN_ARCH_ARM
characteristic_l1 = 16 * 1024;
characteristic_l2 = 128 * 1024;
#endif // XNN_ARCH_ARM
const size_t elementwise_size = sizeof(InType);
benchmark->Arg(characteristic_l1 / elementwise_size / 960 * 960);
benchmark->Arg(characteristic_l2 / elementwise_size / 960 * 960);
}
// Set number of elements for a unary elementwise microkernel such that:
// - It is divisible by 2, 3, 4, 5, 6.
// - It is divisible by AVX512 width.
// - Total memory footprint does not exceed the characteristic cache size for
// the architecture.
template<class InType, class OutType>
void UnaryElementwiseParameters(benchmark::internal::Benchmark* benchmark) {
benchmark->ArgName("N");
size_t characteristic_l1 = 32 * 1024;
size_t characteristic_l2 = 256 * 1024;
#if XNN_ARCH_ARM
characteristic_l1 = 16 * 1024;
characteristic_l2 = 128 * 1024;
#endif // XNN_ARCH_ARM
const size_t elementwise_size = sizeof(InType) + sizeof(OutType);
benchmark->Arg(characteristic_l1 / elementwise_size / 960 * 960);
benchmark->Arg(characteristic_l2 / elementwise_size / 960 * 960);
}
// Set number of elements for a binary elementwise microkernel such that:
// - It is divisible by 2, 3, 4, 5, 6.
// - It is divisible by AVX512 width.
// - Total memory footprint does not exceed the characteristic cache size for
// the architecture.
template<class InType, class OutType>
void BinaryElementwiseParameters(benchmark::internal::Benchmark* benchmark) {
benchmark->ArgName("N");
size_t characteristic_l1 = 32 * 1024;
size_t characteristic_l2 = 256 * 1024;
#if XNN_ARCH_ARM
characteristic_l1 = 16 * 1024;
characteristic_l2 = 128 * 1024;
#endif // XNN_ARCH_ARM
const size_t elementwise_size = 2 * sizeof(InType) + sizeof(OutType);
benchmark->Arg(characteristic_l1 / elementwise_size / 960 * 960);
benchmark->Arg(characteristic_l2 / elementwise_size / 960 * 960);
}
// Set multi-threading parameters appropriate for the processor.
void MultiThreadingParameters(benchmark::internal::Benchmark* benchmark);
typedef bool (*IsaCheckFunction)(benchmark::State& state);
// Check if either ARM VFPv2 or VFPv3 extension is supported.
// If VFP is unsupported, report error in benchmark state, and return false.
bool CheckVFP(benchmark::State& state);
// Check if ARMv6 extensions are supported.
// If ARMv6 extensions are unsupported, report error in benchmark state, and return false.
bool CheckARMV6(benchmark::State& state);
// Check if ARM FP16-ARITH extension is supported.
// If FP16-ARITH is unsupported, report error in benchmark state, and return false.
bool CheckFP16ARITH(benchmark::State& state);
// Check if ARM NEON extension is supported.
// If NEON is unsupported, report error in benchmark state, and return false.
bool CheckNEON(benchmark::State& state);
// Check if ARM NEON-FP16 extension is supported.
// If NEON-FP16 is unsupported, report error in benchmark state, and return false.
bool CheckNEONFP16(benchmark::State& state);
// Check if ARM NEON-FMA extension is supported.
// If NEON-FMA is unsupported, report error in benchmark state, and return false.
bool CheckNEONFMA(benchmark::State& state);
// Check if ARMv8 NEON instructions are supported.
// If ARMv8 NEON is unsupported, report error in benchmark state, and return false.
bool CheckNEONV8(benchmark::State& state);
// Check if ARM NEON-FP16-ARITH extension is supported.
// If NEON-FP16-ARITH is unsupported, report error in benchmark state, and return false.
bool CheckNEONFP16ARITH(benchmark::State& state);
// Check if ARM NEON-BF16 extension is supported.
// If NEON-BF16 is unsupported, report error in benchmark state, and return false.
bool CheckNEONBF16(benchmark::State& state);
// Check if ARM DOT extension is supported.
// If DOT is unsupported, report error in benchmark state, and return false.
bool CheckNEONDOT(benchmark::State& state);
// Check if RISC-V V (vector) extension is supported.
// If V is unsupported, report error in benchmark state, and return false.
bool CheckRVV(benchmark::State& state);
// Check if x86 SSSE3 extension is supported.
// If SSSE3 is unsupported, report error in benchmark state, and return false.
bool CheckSSSE3(benchmark::State& state);
// Check if x86 SSE4.1 extension is supported.
// If SSE4.1 is unsupported, report error in benchmark state, and return false.
bool CheckSSE41(benchmark::State& state);
// Check if x86 AVX extension is supported.
// If AVX is unsupported, report error in benchmark state, and return false.
bool CheckAVX(benchmark::State& state);
// Check if x86 F16C extension is supported.
// If F16C is unsupported, report error in benchmark state, and return false.
bool CheckF16C(benchmark::State& state);
// Check if x86 XOP extension is supported.
// If XOP is unsupported, report error in benchmark state, and return false.
bool CheckXOP(benchmark::State& state);
// Check if x86 FMA3 extension is supported.
// If FMA3 is unsupported, report error in benchmark state, and return false.
bool CheckFMA3(benchmark::State& state);
// Check if x86 AVX2 extension is supported.
// If AVX2 is unsupported, report error in benchmark state, and return false.
bool CheckAVX2(benchmark::State& state);
// Check if x86 AVX512F extension is supported.
// If AVX512F is unsupported, report error in benchmark state, and return false.
bool CheckAVX512F(benchmark::State& state);
// Check if x86 SKX-level AVX512 extensions (AVX512F, AVX512CD, AVX512BW, AVX512DQ, and AVX512VL) are supported.
// If SKX-level AVX512 extensions are unsupported, report error in benchmark state, and return false.
bool CheckAVX512SKX(benchmark::State& state);
// Check if x86 VBMI + SKX-level AVX512 extensions (AVX512F, AVX512CD, AVX512BW, AVX512DQ, and AVX512VL) are supported.
// If VBMI or SKX-level AVX512 extensions are unsupported, report error in benchmark state, and return false.
bool CheckAVX512VBMI(benchmark::State& state);
// Check if PSHUFB instruction is available in WAsm Relaxed SIMD as Relaxed Swizzle.
// If WAsm PSHUFB is unsupported, report error in benchmark state, and return false.
bool CheckWAsmPSHUFB(benchmark::State& state);
// Check if SDOT instruction is available in WAsm Relaxed SIMD as Relaxed Integer Dot Product with Accumulation.
// If WAsm SDOT is unsupported, report error in benchmark state, and return false.
bool CheckWAsmSDOT(benchmark::State& state);
// Check if BLENDVPS instruction is available in WAsm Relaxed SIMD as Relaxed Lane Select.
// If WAsm BLENDVPS is unsupported, report error in benchmark state, and return false.
bool CheckWAsmBLENDVPS(benchmark::State& state);
template <class T>
inline T DivideRoundUp(T x, T q) {
return x / q + T(x % q != 0);
}
template <class T>
inline T RoundUp(T x, T q) {
return q * DivideRoundUp(x, q);
}
template <class T>
inline T Doz(T a, T b) {
return a >= b ? a - b : T(0);
}
#if XNN_PLATFORM_JIT
// A struct that uses RAII pattern to allocate and release code memory.
struct CodeMemoryHelper {
CodeMemoryHelper();
~CodeMemoryHelper();
xnn_code_buffer buffer;
xnn_status status;
};
#endif // XNN_PLATFORM_JIT
} // namespace utils
} // namespace benchmark
| 8,393 | 35.977974 | 119 |
h
|
XNNPACK
|
XNNPACK-master/eval/math-evaluation-tester.h
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <cmath>
#include <xnnpack/math-stubs.h>
class MathEvaluationTester {
public:
inline MathEvaluationTester& input_value(float value) {
this->input_min_ = value;
this->input_max_ = value;
return *this;
}
inline MathEvaluationTester& input_range(float lower_bound, float upper_bound) {
this->input_min_ = lower_bound;
this->input_max_ = upper_bound;
return *this;
}
inline float input_min() const {
return this->input_min_;
}
inline float input_max() const {
return this->input_max_;
}
void TestOutputMatchReference(xnn_f16_unary_math_fn math_fn, float output_value) const;
void TestOutputMatchReference(xnn_f32_unary_math_fn math_fn, float output_value) const;
void TestOutputMatchZero(xnn_f16_unary_math_fn math_fn) const;
void TestOutputMatchZero(xnn_f32_unary_math_fn math_fn) const;
void TestNaN(xnn_f16_unary_math_fn math_fn) const;
void TestNaN(xnn_f32_unary_math_fn math_fn) const;
private:
static constexpr int kBlockSize = 1024;
float input_min_ = std::nanf("");
float input_max_ = std::nanf("");
};
| 1,273 | 24.48 | 89 |
h
|
XNNPACK
|
XNNPACK-master/src/allocator.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdlib.h>
#ifdef __ANDROID__
#include <malloc.h>
#endif
#include <xnnpack/allocator.h>
#include <xnnpack/common.h>
extern int posix_memalign(void **memptr, size_t alignment, size_t size);
static void* xnn_allocate(void* context, size_t size) {
return malloc(size);
}
static void* xnn_reallocate(void* context, void* pointer, size_t size) {
return realloc(pointer, size);
}
static void xnn_deallocate(void* context, void* pointer) {
if XNN_LIKELY(pointer != NULL) {
free(pointer);
}
}
static void* xnn_aligned_allocate(void* context, size_t alignment, size_t size) {
#if XNN_ARCH_WASM
assert(alignment <= 2 * sizeof(void*));
return malloc(size);
#elif XNN_PLATFORM_ANDROID
return memalign(alignment, size);
#elif XNN_PLATFORM_WINDOWS
return _aligned_malloc(size, alignment);
#else
void* memory_ptr = NULL;
if (posix_memalign(&memory_ptr, alignment, size) != 0) {
return NULL;
}
return memory_ptr;
#endif
}
static void xnn_aligned_deallocate(void* context, void* pointer) {
if XNN_LIKELY(pointer != NULL) {
#if defined(_WIN32)
_aligned_free(pointer);
#else
free(pointer);
#endif
}
}
const struct xnn_allocator xnn_default_allocator = {
.allocate = xnn_allocate,
.reallocate = xnn_reallocate,
.deallocate = xnn_deallocate,
.aligned_allocate = xnn_aligned_allocate,
.aligned_deallocate = xnn_aligned_deallocate,
};
| 1,608 | 22.661765 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/argmaxpool-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/argmaxpool.h>
static struct xnn_argmaxpool_config f32_argmaxpool_config[XNN_MAX_F32_ARGMAXPOOL_UKERNELS ] = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f32_argmaxpool = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f32_argmaxpool = PTHREAD_ONCE_INIT;
#endif
static void init_f32_argmaxpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_argmaxpool_config[0] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_4x__neon_c4,
.first_pass_tile_size = 4,
};
f32_argmaxpool_config[1] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9x__neon_c4,
.first_pass_tile_size = 9,
};
f32_argmaxpool_config[2] = (struct xnn_argmaxpool_config) {
.mp = (xnn_argmaxpool_multipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9p8x__neon_c4,
.first_pass_tile_size = 9,
.remainder_pass_tile_size = 8,
};
} else if (!XNN_PLATFORM_MOBILE) {
f32_argmaxpool_config[0] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_4x__scalar_c1,
.first_pass_tile_size = 4,
};
f32_argmaxpool_config[1] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9x__scalar_c1,
.first_pass_tile_size = 9,
};
f32_argmaxpool_config[2] = (struct xnn_argmaxpool_config) {
.mp = (xnn_argmaxpool_multipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1,
.first_pass_tile_size = 9,
.remainder_pass_tile_size = 8,
};
}
#elif XNN_ARCH_ARM64
f32_argmaxpool_config[0] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_4x__neon_c4,
.first_pass_tile_size = 4,
};
f32_argmaxpool_config[1] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9x__neon_c4,
.first_pass_tile_size = 9,
};
f32_argmaxpool_config[2] = (struct xnn_argmaxpool_config) {
.mp = (xnn_argmaxpool_multipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9p8x__neon_c4,
.first_pass_tile_size = 9,
.remainder_pass_tile_size = 8,
};
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_argmaxpool_config[0] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_4x__sse2_c4,
.first_pass_tile_size = 4,
};
f32_argmaxpool_config[1] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9x__sse2_c4,
.first_pass_tile_size = 9,
};
f32_argmaxpool_config[2] = (struct xnn_argmaxpool_config) {
.mp = (xnn_argmaxpool_multipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9p8x__sse2_c4,
.first_pass_tile_size = 9,
.remainder_pass_tile_size = 8,
};
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
f32_argmaxpool_config[0] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_4x__wasmsimd_c4,
.first_pass_tile_size = 4,
};
f32_argmaxpool_config[1] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9x__wasmsimd_c4,
.first_pass_tile_size = 9,
};
f32_argmaxpool_config[2] = (struct xnn_argmaxpool_config) {
.mp = (xnn_argmaxpool_multipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9p8x__wasmsimd_c4,
.first_pass_tile_size = 9,
.remainder_pass_tile_size = 8,
};
#elif XNN_ARCH_WASM
f32_argmaxpool_config[0] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_4x__scalar_c1,
.first_pass_tile_size = 4,
};
f32_argmaxpool_config[1] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9x__scalar_c1,
.first_pass_tile_size = 9,
};
f32_argmaxpool_config[2] = (struct xnn_argmaxpool_config) {
.mp = (xnn_argmaxpool_multipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1,
.first_pass_tile_size = 9,
.remainder_pass_tile_size = 8,
};
#elif XNN_ARCH_RISCV
f32_argmaxpool_config[0] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_4x__scalar_c1,
.first_pass_tile_size = 4,
};
f32_argmaxpool_config[1] = (struct xnn_argmaxpool_config) {
.up = (xnn_argmaxpool_unipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9x__scalar_c1,
.first_pass_tile_size = 9,
};
f32_argmaxpool_config[2] = (struct xnn_argmaxpool_config) {
.mp = (xnn_argmaxpool_multipass_ukernel_fn) xnn_f32_argmaxpool_ukernel_9p8x__scalar_c1,
.first_pass_tile_size = 9,
.remainder_pass_tile_size = 8,
};
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f32_argmaxpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_argmaxpool_config();
return TRUE;
}
#endif
const struct xnn_argmaxpool_config* xnn_init_f32_argmaxpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_argmaxpool, &init_f32_argmaxpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_argmaxpool, &init_f32_argmaxpool_config);
#endif
return f32_argmaxpool_config;
}
| 6,201 | 39.272727 | 114 |
c
|
XNNPACK
|
XNNPACK-master/src/avgpool-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/vunary.h>
static struct xnn_avgpool_config f16_avgpool_config = {0};
static struct xnn_avgpool_config f32_avgpool_config = {0};
static struct xnn_avgpool_config qu8_avgpool_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_avgpool = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_avgpool = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_qu8_avgpool = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_avgpool = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_avgpool = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_qu8_avgpool = PTHREAD_ONCE_INIT;
#endif
static void init_f16_avgpool_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f16_avgpool_minmax_ukernel_9x__neonfp16arith_c8;
f16_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f16_avgpool_minmax_ukernel_9p8x__neonfp16arith_c8;
f16_avgpool_config.init.f16 = xnn_init_f16_scaleminmax_fp16arith_params;
f16_avgpool_config.primary_tile = 9;
f16_avgpool_config.incremental_tile = 8;
f16_avgpool_config.channel_tile = 8;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f16_avgpool_minmax_ukernel_9x__neonfp16arith_c8;
f16_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f16_avgpool_minmax_ukernel_9p8x__neonfp16arith_c8;
f16_avgpool_config.init.f16 = xnn_init_f16_scaleminmax_fp16arith_params;
f16_avgpool_config.primary_tile = 9;
f16_avgpool_config.incremental_tile = 8;
f16_avgpool_config.channel_tile = 8;
}
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_avx2) {
f16_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f16_avgpool_minmax_ukernel_9x__f16c_c8;
f16_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f16_avgpool_minmax_ukernel_9p8x__f16c_c8;
f16_avgpool_config.init.f16 = xnn_init_f16_scaleminmax_avx_params;
f16_avgpool_config.primary_tile = 9;
f16_avgpool_config.incremental_tile = 8;
f16_avgpool_config.channel_tile = 8;
}
#endif
}
static void init_f32_avgpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9x__neon_c4;
f32_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4;
f32_avgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_avgpool_config.primary_tile = 9;
f32_avgpool_config.incremental_tile = 8;
f32_avgpool_config.channel_tile = 4;
} else if (!XNN_PLATFORM_MOBILE) {
f32_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9x__scalar_c1;
f32_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9p8x__scalar_c1;
f32_avgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_avgpool_config.primary_tile = 9;
f32_avgpool_config.incremental_tile = 8;
f32_avgpool_config.channel_tile = 1;
}
#elif XNN_ARCH_ARM64
f32_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9x__neon_c4;
f32_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4;
f32_avgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_avgpool_config.primary_tile = 9;
f32_avgpool_config.incremental_tile = 8;
f32_avgpool_config.channel_tile = 4;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9x__sse_c4;
f32_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4;
f32_avgpool_config.init.f32 = xnn_init_f32_scaleminmax_sse_params;
f32_avgpool_config.primary_tile = 9;
f32_avgpool_config.incremental_tile = 8;
f32_avgpool_config.channel_tile = 4;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9x__wasmsimd_x86_c4;
f32_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4;
f32_avgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_avgpool_config.primary_tile = 9;
f32_avgpool_config.incremental_tile = 8;
f32_avgpool_config.channel_tile = 4;
} else {
f32_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9x__wasmsimd_arm_c4;
f32_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4;
f32_avgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_avgpool_config.primary_tile = 9;
f32_avgpool_config.incremental_tile = 8;
f32_avgpool_config.channel_tile = 4;
}
#elif XNN_ARCH_WASM
f32_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9x__wasm_c1;
f32_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9p8x__wasm_c1;
f32_avgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_avgpool_config.primary_tile = 9;
f32_avgpool_config.incremental_tile = 8;
f32_avgpool_config.channel_tile = 1;
#elif XNN_ARCH_RISCV
f32_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9x__scalar_c1;
f32_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_f32_avgpool_minmax_ukernel_9p8x__scalar_c1;
f32_avgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_avgpool_config.primary_tile = 9;
f32_avgpool_config.incremental_tile = 8;
f32_avgpool_config.channel_tile = 1;
#endif
}
static void init_qu8_avgpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
qu8_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9x__neon_c8;
qu8_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__neon_c8;
qu8_avgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_neon_params;
qu8_avgpool_config.primary_tile = 9;
qu8_avgpool_config.incremental_tile = 8;
qu8_avgpool_config.channel_tile = 8;
} else if (!XNN_PLATFORM_MOBILE) {
qu8_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9x__scalar_imagic_c1;
qu8_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__scalar_imagic_c1;
qu8_avgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_scalar_imagic_params;
qu8_avgpool_config.primary_tile = 9;
qu8_avgpool_config.incremental_tile = 8;
qu8_avgpool_config.channel_tile = 1;
}
#elif XNN_ARCH_ARM64
qu8_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9x__neon_c8;
qu8_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__neon_c8;
qu8_avgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_neon_params;
qu8_avgpool_config.primary_tile = 9;
qu8_avgpool_config.incremental_tile = 8;
qu8_avgpool_config.channel_tile = 8;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
qu8_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9x__sse2_c8;
qu8_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__sse2_c8;
qu8_avgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_sse2_params;
qu8_avgpool_config.primary_tile = 9;
qu8_avgpool_config.incremental_tile = 8;
qu8_avgpool_config.channel_tile = 8;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
qu8_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9x__scalar_imagic_c1;
qu8_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__scalar_imagic_c1;
qu8_avgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_scalar_imagic_params;
qu8_avgpool_config.primary_tile = 9;
qu8_avgpool_config.incremental_tile = 8;
qu8_avgpool_config.channel_tile = 1;
#elif XNN_ARCH_WASM
qu8_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9x__scalar_imagic_c1;
qu8_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__scalar_imagic_c1;
qu8_avgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_scalar_imagic_params;
qu8_avgpool_config.primary_tile = 9;
qu8_avgpool_config.incremental_tile = 8;
qu8_avgpool_config.channel_tile = 1;
#elif XNN_ARCH_RISCV
qu8_avgpool_config.unipass = (xnn_avgpool_unipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9x__scalar_imagic_c1;
qu8_avgpool_config.multipass = (xnn_avgpool_multipass_ukernel_fn) xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__scalar_imagic_c1;
qu8_avgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_scalar_imagic_params;
qu8_avgpool_config.primary_tile = 9;
qu8_avgpool_config.incremental_tile = 8;
qu8_avgpool_config.channel_tile = 1;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_avgpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_avgpool_config();
return TRUE;
}
static BOOL CALLBACK init_f32_avgpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_avgpool_config();
return TRUE;
}
static BOOL CALLBACK init_qu8_avgpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_qu8_avgpool_config();
return TRUE;
}
#endif
const struct xnn_avgpool_config* xnn_init_f16_avgpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_avgpool, &init_f16_avgpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_avgpool, &init_f16_avgpool_config);
#endif
return &f16_avgpool_config;
}
const struct xnn_avgpool_config* xnn_init_f32_avgpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_avgpool, &init_f32_avgpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_avgpool, &init_f32_avgpool_config);
#endif
return &f32_avgpool_config;
}
const struct xnn_avgpool_config* xnn_init_qu8_avgpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_qu8_avgpool, &init_qu8_avgpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_qu8_avgpool, &init_qu8_avgpool_config);
#endif
return &qu8_avgpool_config;
}
| 12,891 | 49.956522 | 131 |
c
|
XNNPACK
|
XNNPACK-master/src/cache.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h> // For assert.
#include <stddef.h> // For size_t.
#include <stdint.h> // For uint32_t.
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/cache.h>
#include <xnnpack/log.h>
#include <xnnpack/math.h>
#include <xnnpack/mutex.h>
#define XNN_CACHE_HASH_SEED 7
#define XNN_CACHE_INITIAL_BUCKETS 32
#define XNN_CACHE_MAX_LOAD 0.75
// Max load factor is 0.75 (3/4), i.e. num_entries / num_buckets > 3 / 4.
#define XNN_CACHE_MAX_LOAD_ENTRIES_MULTIPLIER 4
#define XNN_CACHE_MAX_LOAD_BUCKETS_MULTIPLIER 3
#define XNN_CACHE_GROWTH_FACTOR 2
// MurmurHash3 implementation, copied from smhasher, with minor modifications in
// style and main loop.
static inline uint32_t fmix32(uint32_t h)
{
h ^= h >> 16;
h *= UINT32_C(0x85EBCA6B);
h ^= h >> 13;
h *= UINT32_C(0xC2B2AE35);
h ^= h >> 16;
return h;
}
static uint32_t murmur_hash3(const void* key, size_t len, uint32_t seed)
{
const uint8_t* data = (const uint8_t*) key;
uint32_t h1 = seed;
const uint32_t c1 = UINT32_C(0xCC9E2D51);
const uint32_t c2 = UINT32_C(0x1B873593);
const uint32_t* blocks = (const uint32_t*) data;
for (; len >= sizeof(uint32_t); len -= sizeof(uint32_t)) {
uint32_t k1 = *blocks++;
k1 *= c1;
k1 = math_rotl_u32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = math_rotl_u32(h1, 13);
h1 = h1 * 5 + UINT32_C(0xE6546B64);
}
const uint8_t* tail = (const uint8_t*) blocks;
uint32_t k1 = 0;
switch (len & 3) {
case 3:
k1 ^= tail[2] << 16;
case 2:
k1 ^= tail[1] << 8;
case 1:
k1 ^= tail[0];
k1 *= c1;
k1 = math_rotl_u32(k1, 15);
k1 *= c2;
h1 ^= k1;
};
h1 ^= len;
return fmix32(h1);
}
#ifndef NDEBUG
// This function is only used by an assert, so do not include it in non-debug
// builds.
static inline size_t cache_size(struct xnn_cache* cache) {
switch (cache->type) {
case xnn_cache_type_code:
return cache->code.size;
case xnn_cache_type_weights:
return cache->weights.size;
default:
XNN_UNREACHABLE;
}
return SIZE_MAX;
}
#endif
static inline void* cache_start(struct xnn_cache* cache) {
switch (cache->type) {
case xnn_cache_type_code:
return cache->code.start;
case xnn_cache_type_weights:
return cache->weights.start;
default:
XNN_UNREACHABLE;
}
return NULL;
}
enum xnn_status xnn_init_cache_with_size(struct xnn_cache* cache, size_t num_buckets, enum xnn_cache_type cache_type)
{
memset(cache, 0, sizeof(struct xnn_cache));
cache->buckets = (struct xnn_cache_bucket*) xnn_allocate_zero_memory(num_buckets * sizeof(struct xnn_cache_bucket));
if (cache->buckets == NULL) {
xnn_log_error("fail to allocate memory for cache buckets");
return xnn_status_out_of_memory;
}
cache->type = cache_type;
cache->num_buckets = num_buckets;
return xnn_status_success;
}
enum xnn_status xnn_init_code_cache_with_size(struct xnn_code_cache* cache, size_t num_buckets)
{
memset(cache, 0, sizeof(struct xnn_code_cache));
enum xnn_status status = xnn_status_success;
status = xnn_init_cache_with_size(&cache->cache, num_buckets, xnn_cache_type_code);
if (status != xnn_status_success) {
goto error;
}
status = xnn_allocate_code_memory(&cache->cache.code, XNN_DEFAULT_CODE_BUFFER_SIZE);
if (status != xnn_status_success) {
goto error;
}
return xnn_status_success;
error:
xnn_release_code_cache(cache);
return status;
}
enum xnn_status xnn_init_code_cache(struct xnn_code_cache* cache)
{
return xnn_init_code_cache_with_size(cache, XNN_CACHE_INITIAL_BUCKETS);
}
static bool cache_buckets_grow(struct xnn_cache* cache)
{
const size_t new_num_buckets = cache->num_buckets * XNN_CACHE_GROWTH_FACTOR;
assert(is_po2(new_num_buckets));
struct xnn_cache tmp_cache;
xnn_init_cache_with_size(&tmp_cache, new_num_buckets, cache->type);
for (size_t i = 0; i < cache->num_buckets; i++) {
struct xnn_cache_bucket b = cache->buckets[i];
if (b.size == 0) {
continue;
}
// Find the first empty slot by linear probing to insert. No need to check
// hashes since we are not looking up anything, just moving things around
// into a bigger hash table.
const size_t mask = tmp_cache.num_buckets - 1;
size_t idx = b.hash & mask;
while (tmp_cache.buckets[idx].size != 0) {
idx = (idx + 1) & mask;
}
tmp_cache.buckets[idx].hash = b.hash;
tmp_cache.buckets[idx].size = b.size;
tmp_cache.buckets[idx].offset = b.offset;
}
xnn_release_memory(cache->buckets);
cache->buckets = tmp_cache.buckets;
cache->num_buckets = tmp_cache.num_buckets;
return true;
}
static inline bool bytes_equal(struct xnn_cache* cache, void* ptr, size_t size, size_t offset)
{
return memcmp(ptr, (void*) ((uintptr_t) cache_start(cache) + offset), size) == 0;
}
static bool lookup(struct xnn_cache* cache, void* ptr, size_t size, uint32_t hash, size_t* index)
{
assert(is_po2(cache->num_buckets));
const size_t mask = cache->num_buckets - 1;
size_t idx = hash & mask;
const struct xnn_cache_bucket* buckets = cache->buckets;
// Linear probing.
while (buckets[idx].size != 0 &&
!(buckets[idx].hash == hash &&
size == buckets[idx].size &&
bytes_equal(cache, ptr, buckets[idx].size, buckets[idx].offset))) {
idx = (idx + 1) & mask;
}
*index = idx;
if (buckets[idx].size == 0) {
return false;
} else {
return true;
}
}
static bool insert(struct xnn_cache* cache, void* ptr, size_t size)
{
const uint32_t hash = murmur_hash3(ptr, size, /*seed=*/XNN_CACHE_HASH_SEED);
size_t idx;
const bool found = lookup(cache, ptr, size, hash, &idx);
if (found) {
return false;
}
// Ensure we have enough buckets to keep under our load limit.
if (cache->num_entries * XNN_CACHE_MAX_LOAD_ENTRIES_MULTIPLIER >
cache->num_buckets * XNN_CACHE_MAX_LOAD_BUCKETS_MULTIPLIER) {
if (!cache_buckets_grow(cache)) {
// Can't grow hash table anymore.
xnn_log_error("failed to grow cache buckets");
return false;
}
xnn_log_debug("successfully grew cache buckets");
// If the cache grew, idx is stale, since that is based on the old cache's num_buckets.
const bool found_in_grown_cache = lookup(cache, ptr, size, hash, &idx);
assert(!found_in_grown_cache);
(void) found_in_grown_cache; // Silence unused variable warnings.
}
// Check that ptr points into cache's buffer.
assert((uintptr_t) ptr >= (uintptr_t) cache_start(cache));
if (cache->type == xnn_cache_type_code) {
assert((uintptr_t) ptr < (uintptr_t) cache_start(cache) + cache_size(cache));
}
const size_t offset = (uintptr_t) ptr - (uintptr_t) cache_start(cache);
// Insert the entry.
cache->buckets[idx].size = size;
cache->buckets[idx].hash = hash;
cache->buckets[idx].offset = offset;
cache->num_entries++;
return true;
}
// Checks if a generated microkernel is already in the cache, returns the offset
// if found, XNN_CACHE_NOT_FOUND otherwise.
static size_t lookup_cache(struct xnn_cache* cache, void* ptr, size_t size)
{
const uint32_t hash = murmur_hash3(ptr, size, /*seed=*/XNN_CACHE_HASH_SEED);
size_t bucket_idx;
if (lookup(cache, ptr, size, hash, &bucket_idx)) {
cache->hits++;
return cache->buckets[bucket_idx].offset;
} else {
cache->misses++;
return XNN_CACHE_NOT_FOUND;
}
}
size_t xnn_get_or_insert_cache(struct xnn_cache* cache, void* ptr, size_t size)
{
const size_t found_offset = lookup_cache(cache, ptr, size);
if (found_offset != XNN_CACHE_NOT_FOUND) {
if (cache->type == xnn_cache_type_code) {
// Found in the cache, rewind the buffer because code generators update buffer size.
cache->code.size -= size;
}
return found_offset;
}
if (cache->type == xnn_cache_type_weights) {
// Cache miss, weights packing functions don't update buffer size, update it here.
cache->weights.size += size;
}
const size_t offset = (uintptr_t) ptr - (uintptr_t) cache_start(cache);
if (!insert(cache, ptr, size)) {
return XNN_CACHE_NOT_FOUND;
}
return offset;
}
size_t xnn_get_or_insert_code_cache(struct xnn_code_cache* cache, void* ptr, size_t size)
{
return xnn_get_or_insert_cache(&cache->cache, ptr, size);
}
enum xnn_status xnn_release_code_cache(struct xnn_code_cache* cache)
{
if XNN_LIKELY(cache != NULL) {
assert(cache->cache.type == xnn_cache_type_code);
xnn_release_code_memory(&cache->cache.code);
xnn_release_memory(cache->cache.buckets);
}
return xnn_status_success;
}
enum xnn_status xnn_internal_init_weights_cache(
struct xnn_weights_cache* cache,
size_t num_buckets,
size_t buffer_size)
{
memset(cache, 0, sizeof(struct xnn_weights_cache));
enum xnn_status status = xnn_status_success;
status = xnn_init_cache_with_size(&cache->cache, num_buckets, xnn_cache_type_weights);
if (status != xnn_status_success) {
goto error;
}
status = xnn_allocate_weights_memory(&cache->cache.weights, buffer_size);
if (status != xnn_status_success) {
goto error;
}
status = xnn_mutex_init(&cache->mutex);
if (status != xnn_status_success) {
goto error;
}
return xnn_status_success;
error:
xnn_release_weights_cache(cache);
return status;
}
enum xnn_status xnn_init_weights_cache_with_size(struct xnn_weights_cache* cache, size_t size)
{
return xnn_internal_init_weights_cache(cache, XNN_CACHE_INITIAL_BUCKETS, size);
}
enum xnn_status xnn_init_weights_cache(struct xnn_weights_cache* cache)
{
return xnn_init_weights_cache_with_size(cache, XNN_DEFAULT_WEIGHTS_BUFFER_SIZE);
}
enum xnn_status xnn_finalize_weights_cache(
struct xnn_weights_cache* cache,
enum xnn_weights_cache_finalization_kind finalization_kind)
{
switch (cache->finalization_state) {
case xnn_cache_state_hard_finalized:
case xnn_cache_state_soft_finalized:
xnn_log_error("failed to finalize an already final weights cache");
return xnn_status_invalid_state;
case xnn_cache_state_not_finalized: {
enum xnn_status status;
enum xnn_cache_state finalized_state;
if (finalization_kind == xnn_weights_cache_finalization_kind_hard) {
xnn_log_debug("hard finalizing weights cache");
status = xnn_finalize_weights_memory(&cache->cache.weights);
// Also release the memory used by hash table (but not the weights memory).
xnn_release_memory(cache->cache.buckets);
cache->cache.buckets = NULL;
finalized_state = xnn_cache_state_hard_finalized;
} else {
xnn_log_debug("soft finalizing weights cache");
assert(finalization_kind == xnn_weights_cache_finalization_kind_soft);
// Finalize weights cache by reserving sufficient space for the insertion of the largest cached weights. This
// ensures that we have space to write packed weights to check for cache hits without growing and moving the
// memory. This has some memory overhead, which can be as large as the size of the largest cached weights,
// rounded up to page size.
status = xnn_reserve_weights_memory(&cache->cache.weights, cache->max_weights_size);
finalized_state = xnn_cache_state_soft_finalized;
}
if (status != xnn_status_success) {
xnn_log_error("failed to finalize weights cache memory");
return xnn_status_invalid_state;
}
cache->finalization_state = finalized_state;
return xnn_status_success;
}
default:
XNN_UNREACHABLE;
}
}
enum xnn_status xnn_release_weights_cache(struct xnn_weights_cache* cache)
{
if XNN_LIKELY(cache != NULL) {
assert(cache->cache.type == xnn_cache_type_weights);
xnn_release_weights_memory(&cache->cache.weights);
if (cache->cache.buckets != NULL) {
xnn_release_memory(cache->cache.buckets);
}
const enum xnn_status status = xnn_mutex_destroy(&cache->mutex);
if (status != xnn_status_success) {
return status;
}
}
return xnn_status_success;
}
static inline bool cache_has_space(struct xnn_weights_cache* cache, size_t n)
{
const struct xnn_weights_buffer buf = cache->cache.weights;
return buf.size + n <= buf.capacity;
}
void* xnn_reserve_space_in_weights_cache(struct xnn_weights_cache* cache, size_t n) {
switch (cache->finalization_state) {
case xnn_cache_state_hard_finalized:
xnn_log_error("cannot reserve additional space in a finalized compact weights cache");
return NULL;
case xnn_cache_state_soft_finalized:
if (!cache_has_space(cache, n)) {
xnn_log_error("cannot reserve additional space in a finalized weights cache");
return NULL;
}
// If the cache is finalized, and has space for `n` bytes, we still want to lock the mutex, because we can have
// multiple writers attempting to write to this space.
break;
default:
break;
}
enum xnn_status status = xnn_mutex_lock(&cache->mutex);
if (status != xnn_status_success) {
return NULL;
}
struct xnn_weights_buffer* buffer = &cache->cache.weights;
status = xnn_reserve_weights_memory(buffer, n);
if (status != xnn_status_success) {
xnn_mutex_unlock(&cache->mutex);
return NULL;
}
return (void*) ((uintptr_t) buffer->start + buffer->size);
}
size_t xnn_get_or_insert_weights_cache(struct xnn_weights_cache* cache, void* ptr, size_t size)
{
size_t offset = XNN_CACHE_NOT_FOUND;
switch (cache->finalization_state) {
case xnn_cache_state_hard_finalized: {
xnn_log_error("cannot insert into a finalized compact weights cache");
return XNN_CACHE_NOT_FOUND;
}
case xnn_cache_state_soft_finalized: {
// Inserting into a finalized weights cache is okay as long as:
// 1. there is sufficient space in the memory (to write the incoming packed weights), or
// 2. incoming packed weights is already in cache
if (!cache_has_space(cache, size)) {
xnn_log_error("insufficient extra space in finalized weights cache buffer");
return XNN_CACHE_NOT_FOUND;
}
// We need to release the mutex from this point onwards, because xnn_reserve_space_in_weights would have returned
// non-NULL (which means that it locked the mutex).
const size_t found_offset = lookup_cache(&cache->cache, ptr, size);
if (found_offset == XNN_CACHE_NOT_FOUND) {
xnn_log_error("packed weights not found in finalized weights cache");
}
offset = found_offset;
break;
}
case xnn_cache_state_not_finalized: {
offset = xnn_get_or_insert_cache(&cache->cache, ptr, size);
if (offset != XNN_CACHE_NOT_FOUND) {
// Found or inserted packed weights, update the largest size seen so far, this will be used when finalizing the
// weights cache, to ensure there is an extra space at the end for future cache checks.
cache->max_weights_size = max(size, cache->max_weights_size);
}
break;
}
}
// Mutex is locked in xnn_reserve_space_in_weights_cache when it returns non-NULL, i.e. when cache is not finalized,
// or if it is xnn_cache_state_soft_finalized and has sufficient space.
const enum xnn_status status = xnn_mutex_unlock(&cache->mutex);
(void) status;
assert(status == xnn_status_success);
return offset;
}
bool xnn_weights_cache_is_finalized(struct xnn_weights_cache* cache) {
return cache->finalization_state != xnn_cache_state_not_finalized;
}
| 15,602 | 30.713415 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/cmul-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/vbinary.h>
static struct xnn_cmul_config f32_cmul_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f32_cmul = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f32_cmul = PTHREAD_ONCE_INIT;
#endif
static void init_f32_cmul_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_cmul_config.ukernel = (xnn_vbinary_ukernel_fn) xnn_f32_vcmul_ukernel__neon_x8;
f32_cmul_config.element_tile = 8;
} else if (!XNN_PLATFORM_MOBILE) {
f32_cmul_config.ukernel = (xnn_vbinary_ukernel_fn) xnn_f32_vcmul_ukernel__scalar_x4;
f32_cmul_config.element_tile = 4;
}
#elif XNN_ARCH_ARM64
f32_cmul_config.ukernel = (xnn_vbinary_ukernel_fn) xnn_f32_vcmul_ukernel__neon_x8;
f32_cmul_config.element_tile = 8;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_cmul_config.ukernel = (xnn_vbinary_ukernel_fn) xnn_f32_vcmul_ukernel__sse_x8;
f32_cmul_config.element_tile = 8;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
f32_cmul_config.ukernel = (xnn_vbinary_ukernel_fn) xnn_f32_vcmul_ukernel__wasmsimd_x8;
f32_cmul_config.element_tile = 8;
#elif XNN_ARCH_WASM
f32_cmul_config.ukernel = (xnn_vbinary_ukernel_fn) xnn_f32_vcmul_ukernel__scalar_x4;
f32_cmul_config.element_tile = 4;
#elif XNN_ARCH_RISCV
f32_cmul_config.ukernel = (xnn_vbinary_ukernel_fn) xnn_f32_vcmul_ukernel__scalar_x4;
f32_cmul_config.element_tile = 4;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f32_cmul_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_cmul_config();
return TRUE;
}
#endif
const struct xnn_cmul_config* xnn_init_f32_cmul_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_cmul, &init_f32_cmul_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_cmul, &init_f32_cmul_config);
#endif
return &f32_cmul_config;
}
| 2,518 | 30.886076 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/conv-hwc2chw-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/conv.h>
static struct xnn_conv_hwc2chw_config f16_conv_hwc2chw_3x3c3s2_config = {0};
static struct xnn_conv_hwc2chw_config f32_conv_hwc2chw_3x3c3s2_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_conv_hwc2chw_3x3c3s2 = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_conv_hwc2chw_3x3c3s2 = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_conv_hwc2chw_3x3c3s2 = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_conv_hwc2chw_3x3c3s2 = PTHREAD_ONCE_INIT;
#endif
static void init_f16_conv_hwc2chw_3x3c3s2_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_conv_hwc2chw_3x3c3s2_config.ukernel_with_symm_padding =
(xnn_conv_hwc2chw_ukernel_fn) xnn_f16_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfp16arith_2x2;
f16_conv_hwc2chw_3x3c3s2_config.init.f16 = xnn_init_f16_minmax_fp16arith_params;
f16_conv_hwc2chw_3x3c3s2_config.output_channel_tile = 4;
f16_conv_hwc2chw_3x3c3s2_config.output_height_tile = 2;
f16_conv_hwc2chw_3x3c3s2_config.output_width_tile = 2;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_conv_hwc2chw_3x3c3s2_config.ukernel_with_symm_padding =
(xnn_conv_hwc2chw_ukernel_fn) xnn_f16_conv_hwc2chw_ukernel_3x3s2p1c3x4__neonfp16arith_2x2;
f16_conv_hwc2chw_3x3c3s2_config.init.f16 = xnn_init_f16_minmax_fp16arith_params;
f16_conv_hwc2chw_3x3c3s2_config.output_channel_tile = 4;
f16_conv_hwc2chw_3x3c3s2_config.output_height_tile = 2;
f16_conv_hwc2chw_3x3c3s2_config.output_width_tile = 2;
}
#endif
}
static void init_f32_conv_hwc2chw_3x3c3s2_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_conv_hwc2chw_3x3c3s2_config.ukernel_with_symm_padding =
(xnn_conv_hwc2chw_ukernel_fn) xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__neon_2x2;
f32_conv_hwc2chw_3x3c3s2_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_conv_hwc2chw_3x3c3s2_config.output_channel_tile = 4;
f32_conv_hwc2chw_3x3c3s2_config.output_height_tile = 2;
f32_conv_hwc2chw_3x3c3s2_config.output_width_tile = 2;
} else if (!XNN_PLATFORM_MOBILE) {
f32_conv_hwc2chw_3x3c3s2_config.ukernel_with_symm_padding =
(xnn_conv_hwc2chw_ukernel_fn) xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__scalar_1x1;
f32_conv_hwc2chw_3x3c3s2_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_conv_hwc2chw_3x3c3s2_config.output_channel_tile = 4;
f32_conv_hwc2chw_3x3c3s2_config.output_height_tile = 1;
f32_conv_hwc2chw_3x3c3s2_config.output_width_tile = 1;
}
#elif XNN_ARCH_ARM64
f32_conv_hwc2chw_3x3c3s2_config.ukernel_with_symm_padding =
(xnn_conv_hwc2chw_ukernel_fn) xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__aarch64_neonfma_2x2;
f32_conv_hwc2chw_3x3c3s2_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_conv_hwc2chw_3x3c3s2_config.output_channel_tile = 4;
f32_conv_hwc2chw_3x3c3s2_config.output_height_tile = 2;
f32_conv_hwc2chw_3x3c3s2_config.output_width_tile = 2;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_conv_hwc2chw_3x3c3s2_config.ukernel_with_symm_padding =
(xnn_conv_hwc2chw_ukernel_fn) xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__sse_2x2;
f32_conv_hwc2chw_3x3c3s2_config.init.f32 = xnn_init_f32_minmax_sse_params;
f32_conv_hwc2chw_3x3c3s2_config.output_channel_tile = 4;
f32_conv_hwc2chw_3x3c3s2_config.output_height_tile = 2;
f32_conv_hwc2chw_3x3c3s2_config.output_width_tile = 2;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
f32_conv_hwc2chw_3x3c3s2_config.ukernel_with_symm_padding =
(xnn_conv_hwc2chw_ukernel_fn) xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__wasmsimd_2x2;
f32_conv_hwc2chw_3x3c3s2_config.init.f32 = xnn_init_f32_minmax_wasmsimd_params;
f32_conv_hwc2chw_3x3c3s2_config.output_channel_tile = 4;
f32_conv_hwc2chw_3x3c3s2_config.output_height_tile = 2;
f32_conv_hwc2chw_3x3c3s2_config.output_width_tile = 2;
#elif XNN_ARCH_WASM
f32_conv_hwc2chw_3x3c3s2_config.ukernel_with_symm_padding =
(xnn_conv_hwc2chw_ukernel_fn) xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__scalar_1x1;
f32_conv_hwc2chw_3x3c3s2_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_conv_hwc2chw_3x3c3s2_config.output_channel_tile = 4;
f32_conv_hwc2chw_3x3c3s2_config.output_height_tile = 1;
f32_conv_hwc2chw_3x3c3s2_config.output_width_tile = 1;
#elif XNN_ARCH_RISCV
f32_conv_hwc2chw_3x3c3s2_config.ukernel_with_symm_padding =
(xnn_conv_hwc2chw_ukernel_fn) xnn_f32_conv_hwc2chw_ukernel_3x3s2p1c3x4__scalar_1x1;
f32_conv_hwc2chw_3x3c3s2_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_conv_hwc2chw_3x3c3s2_config.output_channel_tile = 4;
f32_conv_hwc2chw_3x3c3s2_config.output_height_tile = 1;
f32_conv_hwc2chw_3x3c3s2_config.output_width_tile = 1;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_conv_hwc2chw_3x3c3s2_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_conv_hwc2chw_3x3c3s2_config();
return TRUE;
}
static BOOL CALLBACK init_f32_conv_hwc2chw_3x3c3s2_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_conv_hwc2chw_3x3c3s2_config();
return TRUE;
}
#endif
const struct xnn_conv_hwc2chw_config* xnn_init_f16_conv_hwc2chw_3x3c3s2_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_chw_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_conv_hwc2chw_3x3c3s2, &init_f16_conv_hwc2chw_3x3c3s2_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_conv_hwc2chw_3x3c3s2, &init_f16_conv_hwc2chw_3x3c3s2_config);
#endif
return &f16_conv_hwc2chw_3x3c3s2_config;
}
const struct xnn_conv_hwc2chw_config* xnn_init_f32_conv_hwc2chw_3x3c3s2_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_chw_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_conv_hwc2chw_3x3c3s2, &init_f32_conv_hwc2chw_3x3c3s2_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_conv_hwc2chw_3x3c3s2, &init_f32_conv_hwc2chw_3x3c3s2_config);
#endif
return &f32_conv_hwc2chw_3x3c3s2_config;
}
| 7,320 | 47.164474 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/gavgpool-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/gavgpool.h>
static struct xnn_gavgpool_config f16_gavgpool_config = {0};
static struct xnn_gavgpool_config f32_gavgpool_config = {0};
static struct xnn_gavgpool_config qs8_gavgpool_config = {0};
static struct xnn_gavgpool_config qu8_gavgpool_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_gavgpool = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_gavgpool = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_qs8_gavgpool = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_qu8_gavgpool = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_gavgpool = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_gavgpool = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_qs8_gavgpool = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_qu8_gavgpool = PTHREAD_ONCE_INIT;
#endif
static void init_f16_gavgpool_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c8;
f16_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c8;
f16_gavgpool_config.init.f16 = xnn_init_f16_scaleminmax_fp16arith_params;
f16_gavgpool_config.update.f16 = xnn_update_f16_scaleminmax_fp16arith_params;
f16_gavgpool_config.row_tile = 7;
f16_gavgpool_config.channel_tile = 8;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f16_gavgpool_minmax_ukernel_7x__neonfp16arith_c8;
f16_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c8;
f16_gavgpool_config.init.f16 = xnn_init_f16_scaleminmax_fp16arith_params;
f16_gavgpool_config.update.f16 = xnn_update_f16_scaleminmax_fp16arith_params;
f16_gavgpool_config.row_tile = 7;
f16_gavgpool_config.channel_tile = 8;
}
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_avx2) {
f16_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f16_gavgpool_minmax_ukernel_7x__f16c_c8;
f16_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f16_gavgpool_minmax_ukernel_7p7x__f16c_c8;
f16_gavgpool_config.init.f16 = xnn_init_f16_scaleminmax_avx_params;
f16_gavgpool_config.update.f16 = xnn_update_f16_scaleminmax_avx_params;
f16_gavgpool_config.row_tile = 7;
f16_gavgpool_config.channel_tile = 8;
}
#endif
}
static void init_f32_gavgpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7x__neon_c4;
f32_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4;
f32_gavgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_gavgpool_config.update.f32 = xnn_update_f32_scaleminmax_scalar_params;
f32_gavgpool_config.row_tile = 7;
f32_gavgpool_config.channel_tile = 4;
} else if (!XNN_PLATFORM_MOBILE) {
f32_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7x__scalar_c1;
f32_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7p7x__scalar_c1;
f32_gavgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_gavgpool_config.update.f32 = xnn_update_f32_scaleminmax_scalar_params;
f32_gavgpool_config.row_tile = 7;
f32_gavgpool_config.channel_tile = 1;
}
#elif XNN_ARCH_ARM64
f32_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7x__neon_c4;
f32_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4;
f32_gavgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_gavgpool_config.update.f32 = xnn_update_f32_scaleminmax_scalar_params;
f32_gavgpool_config.row_tile = 7;
f32_gavgpool_config.channel_tile = 4;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7x__sse_c4;
f32_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4;
f32_gavgpool_config.init.f32 = xnn_init_f32_scaleminmax_sse_params;
f32_gavgpool_config.update.f32 = xnn_update_f32_scaleminmax_sse_params;
f32_gavgpool_config.row_tile = 7;
f32_gavgpool_config.channel_tile = 4;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7x__wasmsimd_x86_c4;
f32_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4;
f32_gavgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_gavgpool_config.update.f32 = xnn_update_f32_scaleminmax_scalar_params;
f32_gavgpool_config.row_tile = 7;
f32_gavgpool_config.channel_tile = 4;
} else {
f32_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7x__wasmsimd_arm_c4;
f32_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4;
f32_gavgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_gavgpool_config.update.f32 = xnn_update_f32_scaleminmax_scalar_params;
f32_gavgpool_config.row_tile = 7;
f32_gavgpool_config.channel_tile = 4;
}
#elif XNN_ARCH_WASM
f32_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7x__wasm_c1;
f32_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7p7x__wasm_c1;
f32_gavgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_gavgpool_config.update.f32 = xnn_update_f32_scaleminmax_scalar_params;
f32_gavgpool_config.row_tile = 7;
f32_gavgpool_config.channel_tile = 1;
#elif XNN_ARCH_RISCV
f32_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7x__scalar_c1;
f32_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_f32_gavgpool_minmax_ukernel_7p7x__scalar_c1;
f32_gavgpool_config.init.f32 = xnn_init_f32_scaleminmax_scalar_params;
f32_gavgpool_config.update.f32 = xnn_update_f32_scaleminmax_scalar_params;
f32_gavgpool_config.row_tile = 7;
f32_gavgpool_config.channel_tile = 1;
#endif
}
static void init_qs8_gavgpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
qs8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qs8_gavgpool_minmax_rndnu_ukernel_7x__neon_c8;
qs8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qs8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c8;
qs8_gavgpool_config.init.qs8 = xnn_init_qs8_avgpool_minmax_rndnu_neon_params;
qs8_gavgpool_config.update.qs8 = xnn_update_qs8_avgpool_minmax_rndnu_neon_params;
qs8_gavgpool_config.row_tile = 7;
qs8_gavgpool_config.channel_tile = 8;
} else if (!XNN_PLATFORM_MOBILE) {
qs8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c1;
qs8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1;
qs8_gavgpool_config.init.qs8 = xnn_init_qs8_avgpool_minmax_fp32_scalar_imagic_params;
qs8_gavgpool_config.update.qs8 = xnn_update_qs8_avgpool_minmax_fp32_scalar_imagic_params;
qs8_gavgpool_config.row_tile = 7;
qs8_gavgpool_config.channel_tile = 1;
}
#elif XNN_ARCH_ARM64
qs8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qs8_gavgpool_minmax_rndnu_ukernel_7x__neon_c8;
qs8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qs8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c8;
qs8_gavgpool_config.init.qs8 = xnn_init_qs8_avgpool_minmax_rndnu_neon_params;
qs8_gavgpool_config.update.qs8 = xnn_update_qs8_avgpool_minmax_rndnu_neon_params;
qs8_gavgpool_config.row_tile = 7;
qs8_gavgpool_config.channel_tile = 8;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_sse4_1) {
qs8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse41_c8;
qs8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c8;
qs8_gavgpool_config.init.qs8 = xnn_init_qs8_avgpool_minmax_fp32_sse4_params;
qs8_gavgpool_config.update.qs8 = xnn_update_qs8_avgpool_minmax_fp32_sse4_params;
qs8_gavgpool_config.row_tile = 7;
qs8_gavgpool_config.channel_tile = 8;
} else {
qs8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse2_c8;
qs8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c8;
qs8_gavgpool_config.init.qs8 = xnn_init_qs8_avgpool_minmax_fp32_sse2_params;
qs8_gavgpool_config.update.qs8 = xnn_update_qs8_avgpool_minmax_fp32_sse2_params;
qs8_gavgpool_config.row_tile = 7;
qs8_gavgpool_config.channel_tile = 8;
}
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
qs8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c16;
qs8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c16;
qs8_gavgpool_config.init.qs8 = xnn_init_qs8_avgpool_minmax_fp32_wasmsimd_params;
qs8_gavgpool_config.update.qs8 = xnn_update_qs8_avgpool_minmax_fp32_wasmsimd_params;
qs8_gavgpool_config.row_tile = 7;
qs8_gavgpool_config.channel_tile = 16;
#elif XNN_ARCH_WASM
qs8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4;
qs8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4;
qs8_gavgpool_config.init.qs8 = xnn_init_qs8_avgpool_minmax_fp32_scalar_imagic_params;
qs8_gavgpool_config.update.qs8 = xnn_update_qs8_avgpool_minmax_fp32_scalar_imagic_params;
qs8_gavgpool_config.row_tile = 7;
qs8_gavgpool_config.channel_tile = 4;
#elif XNN_ARCH_RISCV
qs8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c1;
qs8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1;
qs8_gavgpool_config.init.qs8 = xnn_init_qs8_avgpool_minmax_fp32_scalar_imagic_params;
qs8_gavgpool_config.update.qs8 = xnn_update_qs8_avgpool_minmax_fp32_scalar_imagic_params;
qs8_gavgpool_config.row_tile = 7;
qs8_gavgpool_config.channel_tile = 1;
#endif
}
static void init_qu8_gavgpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
qu8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qu8_gavgpool_minmax_rndnu_ukernel_7x__neon_c8;
qu8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qu8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c8;
qu8_gavgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_rndnu_neon_params;
qu8_gavgpool_config.update.qu8 = xnn_update_qu8_avgpool_minmax_rndnu_neon_params;
qu8_gavgpool_config.row_tile = 7;
qu8_gavgpool_config.channel_tile = 8;
} else if (!XNN_PLATFORM_MOBILE) {
qu8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c1;
qu8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1;
qu8_gavgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_scalar_imagic_params;
qu8_gavgpool_config.update.qu8 = xnn_update_qu8_avgpool_minmax_fp32_scalar_imagic_params;
qu8_gavgpool_config.row_tile = 7;
qu8_gavgpool_config.channel_tile = 1;
}
#elif XNN_ARCH_ARM64
qu8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qu8_gavgpool_minmax_rndnu_ukernel_7x__neon_c8;
qu8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qu8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c8;
qu8_gavgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_rndnu_neon_params;
qu8_gavgpool_config.update.qu8 = xnn_update_qu8_avgpool_minmax_rndnu_neon_params;
qu8_gavgpool_config.row_tile = 7;
qu8_gavgpool_config.channel_tile = 8;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_sse4_1) {
qu8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse41_c8;
qu8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c8;
qu8_gavgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_sse4_params;
qu8_gavgpool_config.update.qu8 = xnn_update_qu8_avgpool_minmax_fp32_sse4_params;
qu8_gavgpool_config.row_tile = 7;
qu8_gavgpool_config.channel_tile = 8;
} else {
qu8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__sse2_c8;
qu8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c8;
qu8_gavgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_sse2_params;
qu8_gavgpool_config.update.qu8 = xnn_update_qu8_avgpool_minmax_fp32_sse2_params;
qu8_gavgpool_config.row_tile = 7;
qu8_gavgpool_config.channel_tile = 8;
}
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
qu8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c16;
qu8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c16;
qu8_gavgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_wasmsimd_params;
qu8_gavgpool_config.update.qu8 = xnn_update_qu8_avgpool_minmax_fp32_wasmsimd_params;
qu8_gavgpool_config.row_tile = 7;
qu8_gavgpool_config.channel_tile = 16;
#elif XNN_ARCH_WASM
qu8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4;
qu8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4;
qu8_gavgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_scalar_imagic_params;
qu8_gavgpool_config.update.qu8 = xnn_update_qu8_avgpool_minmax_fp32_scalar_imagic_params;
qu8_gavgpool_config.row_tile = 7;
qu8_gavgpool_config.channel_tile = 4;
#elif XNN_ARCH_RISCV
qu8_gavgpool_config.unipass = (xnn_gavgpool_unipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c1;
qu8_gavgpool_config.multipass = (xnn_gavgpool_multipass_ukernel_fn) xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1;
qu8_gavgpool_config.init.qu8 = xnn_init_qu8_avgpool_minmax_fp32_scalar_imagic_params;
qu8_gavgpool_config.update.qu8 = xnn_update_qu8_avgpool_minmax_fp32_scalar_imagic_params;
qu8_gavgpool_config.row_tile = 7;
qu8_gavgpool_config.channel_tile = 1;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_gavgpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_gavgpool_config();
return TRUE;
}
static BOOL CALLBACK init_f32_gavgpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_gavgpool_config();
return TRUE;
}
static BOOL CALLBACK init_qs8_gavgpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_qs8_gavgpool_config();
return TRUE;
}
static BOOL CALLBACK init_qu8_gavgpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_qu8_gavgpool_config();
return TRUE;
}
#endif
const struct xnn_gavgpool_config* xnn_init_f16_gavgpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_gavgpool, &init_f16_gavgpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_gavgpool, &init_f16_gavgpool_config);
#endif
return &f16_gavgpool_config;
}
const struct xnn_gavgpool_config* xnn_init_f32_gavgpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_gavgpool, &init_f32_gavgpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_gavgpool, &init_f32_gavgpool_config);
#endif
return &f32_gavgpool_config;
}
const struct xnn_gavgpool_config* xnn_init_qs8_gavgpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_qs8_gavgpool, &init_qs8_gavgpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_qs8_gavgpool, &init_qs8_gavgpool_config);
#endif
return &qs8_gavgpool_config;
}
const struct xnn_gavgpool_config* xnn_init_qu8_gavgpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_qu8_gavgpool, &init_qu8_gavgpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_qu8_gavgpool, &init_qu8_gavgpool_config);
#endif
return &qu8_gavgpool_config;
}
| 19,906 | 55.553977 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/gavgpool-cw-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/gavgpool.h>
static struct xnn_gavgpool_cw_config f16_gavgpool_cw_config = {0};
static struct xnn_gavgpool_cw_config f32_gavgpool_cw_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_gavgpool_cw = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_gavgpool_cw = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_gavgpool_cw = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_gavgpool_cw = PTHREAD_ONCE_INIT;
#endif
static void init_f16_gavgpool_cw_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_gavgpool_cw_config.ukernel = (xnn_gavgpool_cw_ukernel_fn) xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x8;
f16_gavgpool_cw_config.init.f16 = xnn_init_f16_gavgpool_neonfp16arith_params;
f16_gavgpool_cw_config.update.f16 = xnn_update_f16_gavgpool_neonfp16arith_params;
f16_gavgpool_cw_config.channel_tile = 8;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_gavgpool_cw_config.ukernel = (xnn_gavgpool_cw_ukernel_fn) xnn_f16_gavgpool_cw_ukernel__neonfp16arith_x8;
f16_gavgpool_cw_config.init.f16 = xnn_init_f16_gavgpool_neonfp16arith_params;
f16_gavgpool_cw_config.update.f16 = xnn_update_f16_gavgpool_neonfp16arith_params;
f16_gavgpool_cw_config.channel_tile = 8;
}
#endif
}
static void init_f32_gavgpool_cw_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_gavgpool_cw_config.ukernel = (xnn_gavgpool_cw_ukernel_fn) xnn_f32_gavgpool_cw_ukernel__neon_x4;
f32_gavgpool_cw_config.channel_tile = 4;
} else if (!XNN_PLATFORM_MOBILE) {
f32_gavgpool_cw_config.ukernel = (xnn_gavgpool_cw_ukernel_fn) xnn_f32_gavgpool_cw_ukernel__scalar_x1;
f32_gavgpool_cw_config.channel_tile = 1;
}
#elif XNN_ARCH_ARM64
f32_gavgpool_cw_config.ukernel = (xnn_gavgpool_cw_ukernel_fn) xnn_f32_gavgpool_cw_ukernel__neon_x4;
f32_gavgpool_cw_config.channel_tile = 4;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_gavgpool_cw_config.ukernel = (xnn_gavgpool_cw_ukernel_fn) xnn_f32_gavgpool_cw_ukernel__sse_x4;
f32_gavgpool_cw_config.channel_tile = 4;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_gavgpool_cw_config.ukernel = (xnn_gavgpool_cw_ukernel_fn) xnn_f32_gavgpool_cw_ukernel__wasmsimd_x86_x4;
f32_gavgpool_cw_config.channel_tile = 4;
} else {
f32_gavgpool_cw_config.ukernel = (xnn_gavgpool_cw_ukernel_fn) xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4;
f32_gavgpool_cw_config.channel_tile = 4;
}
#elif XNN_ARCH_WASM
f32_gavgpool_cw_config.ukernel = (xnn_gavgpool_cw_ukernel_fn) xnn_f32_gavgpool_cw_ukernel__scalar_x1;
f32_gavgpool_cw_config.channel_tile = 1;
#elif XNN_ARCH_RISCV
f32_gavgpool_cw_config.ukernel = (xnn_gavgpool_cw_ukernel_fn) xnn_f32_gavgpool_cw_ukernel__scalar_x1;
f32_gavgpool_cw_config.channel_tile = 1;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_gavgpool_cw_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_gavgpool_cw_config();
return TRUE;
}
static BOOL CALLBACK init_f32_gavgpool_cw_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_gavgpool_cw_config();
return TRUE;
}
#endif
const struct xnn_gavgpool_cw_config* xnn_init_f16_gavgpool_cw_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_chw_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_gavgpool_cw, &init_f16_gavgpool_cw_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_gavgpool_cw, &init_f16_gavgpool_cw_config);
#endif
return &f16_gavgpool_cw_config;
}
const struct xnn_gavgpool_cw_config* xnn_init_f32_gavgpool_cw_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_chw_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_gavgpool_cw, &init_f32_gavgpool_cw_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_gavgpool_cw, &init_f32_gavgpool_cw_config);
#endif
return &f32_gavgpool_cw_config;
}
| 5,359 | 41.204724 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/ibilinear-chw-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/ibilinear.h>
static struct xnn_ibilinear_chw_config f16_ibilinear_chw_config = {0};
static struct xnn_ibilinear_chw_config f32_ibilinear_chw_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_ibilinear_chw = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_ibilinear_chw = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_ibilinear_chw = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_ibilinear_chw = PTHREAD_ONCE_INIT;
#endif
static void init_f16_ibilinear_chw_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_ibilinear_chw_config.ukernel = (xnn_ibilinear_chw_ukernel_fn) xnn_f16_ibilinear_chw_ukernel__neonfp16arith_p8;
f16_ibilinear_chw_config.channel_tile = 1;
f16_ibilinear_chw_config.pixel_tile = 8;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_ibilinear_chw_config.ukernel = (xnn_ibilinear_chw_ukernel_fn) xnn_f16_ibilinear_chw_ukernel__neonfp16arith_p8;
f16_ibilinear_chw_config.channel_tile = 1;
f16_ibilinear_chw_config.pixel_tile = 8;
}
#endif
}
static void init_f32_ibilinear_chw_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_ibilinear_chw_config.ukernel = (xnn_ibilinear_chw_ukernel_fn) xnn_f32_ibilinear_chw_ukernel__neon_p8;
f32_ibilinear_chw_config.channel_tile = 1;
f32_ibilinear_chw_config.pixel_tile = 8;
} else if (!XNN_PLATFORM_MOBILE) {
f32_ibilinear_chw_config.ukernel = (xnn_ibilinear_chw_ukernel_fn) xnn_f32_ibilinear_chw_ukernel__scalar_p4;
f32_ibilinear_chw_config.channel_tile = 1;
f32_ibilinear_chw_config.pixel_tile = 4;
}
#elif XNN_ARCH_ARM64
f32_ibilinear_chw_config.ukernel = (xnn_ibilinear_chw_ukernel_fn) xnn_f32_ibilinear_chw_ukernel__neonfma_p8;
f32_ibilinear_chw_config.channel_tile = 1;
f32_ibilinear_chw_config.pixel_tile = 8;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_ibilinear_chw_config.ukernel = (xnn_ibilinear_chw_ukernel_fn) xnn_f32_ibilinear_chw_ukernel__sse_p8;
f32_ibilinear_chw_config.channel_tile = 1;
f32_ibilinear_chw_config.pixel_tile = 8;
#elif XNN_ARCH_WASMRELAXEDSIMD || XNN_ARCH_WASMSIMD
f32_ibilinear_chw_config.ukernel = (xnn_ibilinear_chw_ukernel_fn) xnn_f32_ibilinear_chw_ukernel__wasmsimd_p8;
f32_ibilinear_chw_config.channel_tile = 1;
f32_ibilinear_chw_config.pixel_tile = 8;
#elif XNN_ARCH_WASM
f32_ibilinear_chw_config.ukernel = (xnn_ibilinear_chw_ukernel_fn) xnn_f32_ibilinear_chw_ukernel__scalar_p4;
f32_ibilinear_chw_config.channel_tile = 1;
f32_ibilinear_chw_config.pixel_tile = 4;
#elif XNN_ARCH_RISCV
f32_ibilinear_chw_config.ukernel = (xnn_ibilinear_chw_ukernel_fn) xnn_f32_ibilinear_chw_ukernel__scalar_p4;
f32_ibilinear_chw_config.channel_tile = 1;
f32_ibilinear_chw_config.pixel_tile = 4;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_ibilinear_chw_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_ibilinear_chw_config();
return TRUE;
}
static BOOL CALLBACK init_f32_ibilinear_chw_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_ibilinear_chw_config();
return TRUE;
}
#endif
const struct xnn_ibilinear_chw_config* xnn_init_f16_ibilinear_chw_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_chw_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_ibilinear_chw, &init_f16_ibilinear_chw_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_ibilinear_chw, &init_f16_ibilinear_chw_config);
#endif
return &f16_ibilinear_chw_config;
}
const struct xnn_ibilinear_chw_config* xnn_init_f32_ibilinear_chw_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_chw_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_ibilinear_chw, &init_f32_ibilinear_chw_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_ibilinear_chw, &init_f32_ibilinear_chw_config);
#endif
return &f32_ibilinear_chw_config;
}
| 5,216 | 40.736 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/ibilinear-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/ibilinear.h>
static struct xnn_ibilinear_config f16_ibilinear_config = {0};
static struct xnn_ibilinear_config f32_ibilinear_config = {0};
static struct xnn_ibilinear_config s8_ibilinear_config = {0};
static struct xnn_ibilinear_config u8_ibilinear_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_ibilinear = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_ibilinear = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_s8_ibilinear = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_u8_ibilinear = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_ibilinear = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_ibilinear = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_s8_ibilinear = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_u8_ibilinear = PTHREAD_ONCE_INIT;
#endif
static void init_f16_ibilinear_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f16_ibilinear_ukernel__neonfp16arith_c8;
f16_ibilinear_config.pixel_tile = 1;
f16_ibilinear_config.channel_tile = 8;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f16_ibilinear_ukernel__neonfp16arith_c8;
f16_ibilinear_config.pixel_tile = 1;
f16_ibilinear_config.channel_tile = 8;
}
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_avx2) {
f16_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f16_ibilinear_ukernel__fma3_c8;
f16_ibilinear_config.pixel_tile = 1;
f16_ibilinear_config.channel_tile = 8;
}
#endif
}
static void init_f32_ibilinear_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f32_ibilinear_ukernel__neon_c8;
f32_ibilinear_config.pixel_tile = 1;
f32_ibilinear_config.channel_tile = 8;
} else if (!XNN_PLATFORM_MOBILE) {
f32_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f32_ibilinear_ukernel__scalar_c2;
f32_ibilinear_config.pixel_tile = 1;
f32_ibilinear_config.channel_tile = 2;
}
#elif XNN_ARCH_ARM64
f32_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f32_ibilinear_ukernel__neonfma_c8;
f32_ibilinear_config.pixel_tile = 1;
f32_ibilinear_config.channel_tile = 8;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f32_ibilinear_ukernel__sse_c8;
f32_ibilinear_config.pixel_tile = 1;
f32_ibilinear_config.channel_tile = 8;
#elif XNN_ARCH_WASMRELAXEDSIMD
f32_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f32_ibilinear_ukernel__wasmrelaxedsimd_c8;
f32_ibilinear_config.pixel_tile = 1;
f32_ibilinear_config.channel_tile = 8;
#elif XNN_ARCH_WASMSIMD
f32_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f32_ibilinear_ukernel__wasmsimd_c8;
f32_ibilinear_config.pixel_tile = 1;
f32_ibilinear_config.channel_tile = 8;
#elif XNN_ARCH_WASM
f32_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f32_ibilinear_ukernel__scalar_c2;
f32_ibilinear_config.pixel_tile = 1;
f32_ibilinear_config.channel_tile = 2;
#elif XNN_ARCH_RISCV
f32_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_f32_ibilinear_ukernel__scalar_c2;
f32_ibilinear_config.pixel_tile = 1;
f32_ibilinear_config.channel_tile = 2;
#endif
}
static void init_s8_ibilinear_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
s8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_s8_ibilinear_ukernel__neon_c8;
s8_ibilinear_config.pixel_tile = 1;
s8_ibilinear_config.channel_tile = 8;
} else if (!XNN_PLATFORM_MOBILE) {
s8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_s8_ibilinear_ukernel__scalar_c1;
s8_ibilinear_config.pixel_tile = 1;
s8_ibilinear_config.channel_tile = 1;
}
#elif XNN_ARCH_ARM64
s8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_s8_ibilinear_ukernel__neon_c16;
s8_ibilinear_config.pixel_tile = 1;
s8_ibilinear_config.channel_tile = 16;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_sse4_1) {
s8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_s8_ibilinear_ukernel__sse41_c16;
s8_ibilinear_config.pixel_tile = 1;
s8_ibilinear_config.channel_tile = 16;
} else {
s8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_s8_ibilinear_ukernel__sse2_c8;
s8_ibilinear_config.pixel_tile = 1;
s8_ibilinear_config.channel_tile = 8;
}
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
s8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_s8_ibilinear_ukernel__wasmsimd_dot16x2_c8;
s8_ibilinear_config.pixel_tile = 1;
s8_ibilinear_config.channel_tile = 8;
#elif XNN_ARCH_WASM
s8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_s8_ibilinear_ukernel__scalar_c1;
s8_ibilinear_config.pixel_tile = 1;
s8_ibilinear_config.channel_tile = 1;
#elif XNN_ARCH_RISCV
s8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_s8_ibilinear_ukernel__scalar_c1;
s8_ibilinear_config.pixel_tile = 1;
s8_ibilinear_config.channel_tile = 1;
#endif
}
static void init_u8_ibilinear_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
u8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_u8_ibilinear_ukernel__neon_c8;
u8_ibilinear_config.pixel_tile = 1;
u8_ibilinear_config.channel_tile = 8;
} else if (!XNN_PLATFORM_MOBILE) {
u8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_u8_ibilinear_ukernel__scalar_c1;
u8_ibilinear_config.pixel_tile = 1;
u8_ibilinear_config.channel_tile = 1;
}
#elif XNN_ARCH_ARM64
u8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_u8_ibilinear_ukernel__neon_c16;
u8_ibilinear_config.pixel_tile = 1;
u8_ibilinear_config.channel_tile = 16;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_sse4_1) {
u8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_u8_ibilinear_ukernel__sse41_c16;
u8_ibilinear_config.pixel_tile = 1;
u8_ibilinear_config.channel_tile = 16;
} else {
u8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_u8_ibilinear_ukernel__sse2_c8;
u8_ibilinear_config.pixel_tile = 1;
u8_ibilinear_config.channel_tile = 8;
}
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
u8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_u8_ibilinear_ukernel__wasmsimd_dot16x2_c8;
u8_ibilinear_config.pixel_tile = 1;
u8_ibilinear_config.channel_tile = 8;
#elif XNN_ARCH_WASM
u8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_u8_ibilinear_ukernel__scalar_c1;
u8_ibilinear_config.pixel_tile = 1;
u8_ibilinear_config.channel_tile = 1;
#elif XNN_ARCH_RISCV
u8_ibilinear_config.ukernel = (xnn_ibilinear_ukernel_fn) xnn_u8_ibilinear_ukernel__scalar_c1;
u8_ibilinear_config.pixel_tile = 1;
u8_ibilinear_config.channel_tile = 1;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_ibilinear_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_ibilinear_config();
return TRUE;
}
static BOOL CALLBACK init_f32_ibilinear_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_ibilinear_config();
return TRUE;
}
static BOOL CALLBACK init_s8_ibilinear_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_s8_ibilinear_config();
return TRUE;
}
static BOOL CALLBACK init_u8_ibilinear_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_u8_ibilinear_config();
return TRUE;
}
#endif
const struct xnn_ibilinear_config* xnn_init_f16_ibilinear_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_ibilinear, &init_f16_ibilinear_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_ibilinear, &init_f16_ibilinear_config);
#endif
return &f16_ibilinear_config;
}
const struct xnn_ibilinear_config* xnn_init_f32_ibilinear_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_ibilinear, &init_f32_ibilinear_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_ibilinear, &init_f32_ibilinear_config);
#endif
return &f32_ibilinear_config;
}
const struct xnn_ibilinear_config* xnn_init_s8_ibilinear_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_s8_ibilinear, &init_s8_ibilinear_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_s8_ibilinear, &init_s8_ibilinear_config);
#endif
return &s8_ibilinear_config;
}
const struct xnn_ibilinear_config* xnn_init_u8_ibilinear_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_u8_ibilinear, &init_u8_ibilinear_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_u8_ibilinear, &init_u8_ibilinear_config);
#endif
return &u8_ibilinear_config;
}
| 11,322 | 41.40824 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/im2col.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack/im2col.h>
void xnn_im2col_conv2d(
size_t output_height,
size_t output_width,
size_t kernel_height,
size_t kernel_width,
size_t subsampling_height,
size_t subsampling_width,
size_t dilation_height,
size_t dilation_width,
size_t input_width,
size_t input_padding_top,
size_t input_padding_left,
size_t group_input_channels_in_bytes,
size_t input_pixel_stride_in_bytes,
const void* input,
void* output)
{
for (size_t output_y = 0; output_y < output_height; output_y++) {
for (size_t output_x = 0; output_x < output_width; output_x++) {
for (size_t kernel_y = 0; kernel_y < kernel_height; kernel_y++) {
const size_t input_y = output_y * subsampling_height + kernel_y * dilation_height - input_padding_top;
if (input_y < output_height) {
for (size_t kernel_x = 0; kernel_x < kernel_width; kernel_x++) {
const size_t input_x = output_x * subsampling_width + kernel_x * dilation_width - input_padding_left;
if (input_x < output_width) {
memcpy(output,
(const void*) ((uintptr_t) input + (input_y * input_width + input_x) * input_pixel_stride_in_bytes),
group_input_channels_in_bytes);
} else {
memset(output, 0, group_input_channels_in_bytes);
}
output = (void*) ((uintptr_t) output + group_input_channels_in_bytes);
}
} else {
memset(output, 0, kernel_width * group_input_channels_in_bytes);
output = (void*) ((uintptr_t) output + kernel_width * group_input_channels_in_bytes);
}
}
}
}
}
| 1,874 | 33.722222 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/init.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <errno.h>
#include <pthread.h>
#include <sys/mman.h>
#include <unistd.h>
#endif
#ifdef _MSC_VER
#include <intrin.h>
#endif
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/log.h>
#include <xnnpack/params.h>
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard = PTHREAD_ONCE_INIT;
#endif
static const struct xnn_allocator* volatile init_allocator = NULL;
static void init(void) {
uint32_t init_flags = XNN_INIT_FLAG_XNNPACK;
memcpy(&xnn_params.allocator, init_allocator, sizeof(struct xnn_allocator));
xnn_params.init_flags = init_flags;
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init();
return TRUE;
}
#endif
enum xnn_status xnn_initialize(const struct xnn_allocator* allocator) {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
xnn_log_error("XNNPACK initialization failed: hardware not supported");
return xnn_status_unsupported_hardware;
}
if (allocator == NULL) {
allocator = &xnn_default_allocator;
}
#ifdef _MSC_VER
_InterlockedCompareExchangePointer((PVOID volatile*) &init_allocator, (PVOID) allocator, NULL);
#else
__sync_bool_compare_and_swap(&init_allocator, NULL, allocator);
#endif
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard, &init_windows, NULL, NULL);
#else
pthread_once(&init_guard, &init);
#endif
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) != 0) {
return xnn_status_success;
} else {
return xnn_status_unsupported_hardware;
}
}
enum xnn_status xnn_deinitialize(void) {
return xnn_status_success;
}
| 2,205 | 24.651163 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/log.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdarg.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <unistd.h>
#endif
#if defined(__ANDROID__)
#include <android/log.h>
#endif
#if defined(__hexagon__)
#include <qurt_printf.h>
#endif
#ifndef XNN_LOG_TO_STDIO
#if defined(__ANDROID__)
#define XNN_LOG_TO_STDIO 0
#else
#define XNN_LOG_TO_STDIO 1
#endif
#endif
#include <xnnpack/log.h>
/* Messages up to this size are formatted entirely on-stack, and don't allocate heap memory */
#define XNN_LOG_STACK_BUFFER_SIZE 1024
#ifdef _WIN32
#define XNN_LOG_NEWLINE_LENGTH 2
#define XNN_LOG_STDERR STD_ERROR_HANDLE
#define XNN_LOG_STDOUT STD_OUTPUT_HANDLE
#elif defined(__hexagon__)
#define XNN_LOG_NEWLINE_LENGTH 1
#define XNN_LOG_STDERR 0
#define XNN_LOG_STDOUT 0
#else
#define XNN_LOG_NEWLINE_LENGTH 1
#define XNN_LOG_STDERR STDERR_FILENO
#define XNN_LOG_STDOUT STDOUT_FILENO
#endif
#if XNN_LOG_TO_STDIO
static void xnn_vlog(int output_handle, const char* prefix, size_t prefix_length, const char* format, va_list args) {
char stack_buffer[XNN_LOG_STACK_BUFFER_SIZE];
char* heap_buffer = NULL;
char* out_buffer = &stack_buffer[0];
/* The first call to vsnprintf will clobber args, thus need a copy in case a second vsnprintf call is needed */
va_list args_copy;
va_copy(args_copy, args);
memcpy(stack_buffer, prefix, prefix_length * sizeof(char));
assert((prefix_length + XNN_LOG_NEWLINE_LENGTH) * sizeof(char) <= XNN_LOG_STACK_BUFFER_SIZE);
const int format_chars = vsnprintf(
&stack_buffer[prefix_length],
XNN_LOG_STACK_BUFFER_SIZE - (prefix_length + XNN_LOG_NEWLINE_LENGTH) * sizeof(char),
format,
args);
if (format_chars < 0) {
/* Format error in the message: silently ignore this particular message. */
goto cleanup;
}
const size_t format_length = (size_t) format_chars;
if ((prefix_length + format_length + XNN_LOG_NEWLINE_LENGTH) * sizeof(char) > XNN_LOG_STACK_BUFFER_SIZE) {
/* Allocate a buffer on heap, and vsnprintf to this buffer */
const size_t heap_buffer_size = (prefix_length + format_length + XNN_LOG_NEWLINE_LENGTH) * sizeof(char);
#if _WIN32
heap_buffer = HeapAlloc(GetProcessHeap(), 0, heap_buffer_size);
#else
heap_buffer = malloc(heap_buffer_size);
#endif
if (heap_buffer == NULL) {
goto cleanup;
}
/* Copy pre-formatted prefix into the on-heap buffer */
memcpy(heap_buffer, prefix, prefix_length * sizeof(char));
vsnprintf(&heap_buffer[prefix_length], (format_length + XNN_LOG_NEWLINE_LENGTH) * sizeof(char), format, args_copy);
out_buffer = heap_buffer;
}
#ifdef _WIN32
out_buffer[prefix_length + format_length] = '\r';
out_buffer[prefix_length + format_length + 1] = '\n';
DWORD bytes_written;
WriteFile(
GetStdHandle((DWORD) output_handle),
out_buffer, (prefix_length + format_length + XNN_LOG_NEWLINE_LENGTH) * sizeof(char),
&bytes_written, NULL);
#elif defined(__hexagon__)
qurt_printf("%s", out_buffer);
#else
out_buffer[prefix_length + format_length] = '\n';
ssize_t bytes_written = write(output_handle, out_buffer, (prefix_length + format_length + XNN_LOG_NEWLINE_LENGTH) * sizeof(char));
(void) bytes_written;
#endif
cleanup:
#ifdef _WIN32
HeapFree(GetProcessHeap(), 0, heap_buffer);
#else
free(heap_buffer);
#endif
va_end(args_copy);
}
#elif defined(__ANDROID__) && XNN_LOG_LEVEL > XNN_LOG_NONE
static const char xnnpack_module[] = "XNNPACK";
#endif
#if XNN_LOG_LEVEL >= XNN_LOG_DEBUG
void xnn_vlog_debug(const char* format, va_list args) {
#if XNN_LOG_TO_STDIO
static const char debug_prefix[17] = {
'D', 'e', 'b', 'u', 'g', ' ', '(', 'X', 'N', 'N', 'P', 'A', 'C', 'K', ')', ':', ' '
};
xnn_vlog(XNN_LOG_STDOUT, debug_prefix, 17, format, args);
#elif defined(__ANDROID__)
__android_log_vprint(ANDROID_LOG_DEBUG, xnnpack_module, format, args);
#else
#error "Platform-specific implementation required"
#endif
}
#endif
#if XNN_LOG_LEVEL >= XNN_LOG_INFO
void xnn_vlog_info(const char* format, va_list args) {
#if XNN_LOG_TO_STDIO
static const char info_prefix[16] = {
'N', 'o', 't', 'e', ' ', '(', 'X', 'N', 'N', 'P', 'A', 'C', 'K', ')', ':', ' '
};
xnn_vlog(XNN_LOG_STDOUT, info_prefix, 16, format, args);
#elif defined(__ANDROID__)
__android_log_vprint(ANDROID_LOG_INFO, xnnpack_module, format, args);
#else
#error "Platform-specific implementation required"
#endif
}
#endif
#if XNN_LOG_LEVEL >= XNN_LOG_WARNING
void xnn_vlog_warning(const char* format, va_list args) {
#if XNN_LOG_TO_STDIO
static const char warning_prefix[20] = {
'W', 'a', 'r', 'n', 'i', 'n', 'g', ' ', 'i', 'n', ' ', 'X', 'N', 'N', 'P', 'A', 'C', 'K', ':', ' '
};
xnn_vlog(XNN_LOG_STDERR, warning_prefix, 20, format, args);
#elif defined(__ANDROID__)
__android_log_vprint(ANDROID_LOG_WARN, xnnpack_module, format, args);
#else
#error "Platform-specific implementation required"
#endif
}
#endif
#if XNN_LOG_LEVEL >= XNN_LOG_ERROR
void xnn_vlog_error(const char* format, va_list args) {
#if XNN_LOG_TO_STDIO
static const char error_prefix[18] = {
'E', 'r', 'r', 'o', 'r', ' ', 'i', 'n', ' ', 'X', 'N', 'N', 'P', 'A', 'C', 'K', ':', ' '
};
xnn_vlog(XNN_LOG_STDERR, error_prefix, 18, format, args);
#elif defined(__ANDROID__)
__android_log_vprint(ANDROID_LOG_ERROR, xnnpack_module, format, args);
#else
#error "Platform-specific implementation required"
#endif
}
#endif
#if XNN_LOG_LEVEL >= XNN_LOG_FATAL
void xnn_vlog_fatal(const char* format, va_list args) {
#if XNN_LOG_TO_STDIO
static const char fatal_prefix[24] = {
'F', 'a', 't', 'a', 'l', ' ', 'e', 'r', 'r', 'o', 'r', ' ', 'i', 'n', ' ', 'X', 'N', 'N', 'P', 'A', 'C', 'K', ':', ' '
};
xnn_vlog(XNN_LOG_STDERR, fatal_prefix, 24, format, args);
#elif defined(__ANDROID__)
__android_log_vprint(ANDROID_LOG_FATAL, xnnpack_module, format, args);
#else
#error "Platform-specific implementation required"
#endif
}
#endif
| 6,494 | 31.313433 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/lut32norm-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/lut.h>
static struct xnn_lut32norm_config u8_lut32norm_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_u8_lut32norm = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_u8_lut32norm = PTHREAD_ONCE_INIT;
#endif
static void init_u8_lut32norm_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
u8_lut32norm_config.lut32norm = xnn_u8_lut32norm_ukernel__scalar;
} else if (!XNN_PLATFORM_MOBILE) {
u8_lut32norm_config.lut32norm = xnn_u8_lut32norm_ukernel__scalar;
}
#elif XNN_ARCH_ARM64
u8_lut32norm_config.lut32norm = xnn_u8_lut32norm_ukernel__scalar;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
u8_lut32norm_config.lut32norm = xnn_u8_lut32norm_ukernel__scalar;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
u8_lut32norm_config.lut32norm = xnn_u8_lut32norm_ukernel__scalar;
#elif XNN_ARCH_WASM
u8_lut32norm_config.lut32norm = xnn_u8_lut32norm_ukernel__scalar;
#elif XNN_ARCH_RISCV
u8_lut32norm_config.lut32norm = xnn_u8_lut32norm_ukernel__scalar;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_u8_lut32norm_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_u8_lut32norm_config();
return TRUE;
}
#endif
const struct xnn_lut32norm_config* xnn_init_u8_lut32norm_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_u8_lut32norm, &init_u8_lut32norm_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_u8_lut32norm, &init_u8_lut32norm_config);
#endif
return &u8_lut32norm_config;
}
| 2,228 | 30.842857 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/maxpool-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/maxpool.h>
static struct xnn_maxpool_config f16_maxpool_config = {0};
static struct xnn_maxpool_config f32_maxpool_config = {0};
static struct xnn_maxpool_config s8_maxpool_config = {0};
static struct xnn_maxpool_config u8_maxpool_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_maxpool = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_maxpool = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_s8_maxpool = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_u8_maxpool = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_maxpool = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_maxpool = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_s8_maxpool = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_u8_maxpool = PTHREAD_ONCE_INIT;
#endif
static void init_f16_maxpool_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f16_maxpool_minmax_ukernel_9p8x__neonfp16arith_c8;
f16_maxpool_config.init.f16 = xnn_init_f16_minmax_fp16arith_params;
f16_maxpool_config.first_pass_tile_size = 9;
f16_maxpool_config.remainder_pass_tile_size = 8;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f16_maxpool_minmax_ukernel_9p8x__neonfp16arith_c8;
f16_maxpool_config.init.f16 = xnn_init_f16_minmax_fp16arith_params;
f16_maxpool_config.first_pass_tile_size = 9;
f16_maxpool_config.remainder_pass_tile_size = 8;
}
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_avx2) {
f16_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f16_maxpool_minmax_ukernel_9p8x__f16c_c8;
f16_maxpool_config.init.f16 = xnn_init_f16_minmax_avx_params;
f16_maxpool_config.first_pass_tile_size = 9;
f16_maxpool_config.remainder_pass_tile_size = 8;
}
#endif
}
static void init_f32_maxpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f32_maxpool_minmax_ukernel_9p8x__neon_c4;
f32_maxpool_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_maxpool_config.first_pass_tile_size = 9;
f32_maxpool_config.remainder_pass_tile_size = 8;
} else if (!XNN_PLATFORM_MOBILE) {
f32_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1;
f32_maxpool_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_maxpool_config.first_pass_tile_size = 9;
f32_maxpool_config.remainder_pass_tile_size = 8;
}
#elif XNN_ARCH_ARM64
f32_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f32_maxpool_minmax_ukernel_9p8x__neon_c4;
f32_maxpool_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_maxpool_config.first_pass_tile_size = 9;
f32_maxpool_config.remainder_pass_tile_size = 8;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f32_maxpool_minmax_ukernel_9p8x__sse_c4;
f32_maxpool_config.init.f32 = xnn_init_f32_minmax_sse_params;
f32_maxpool_config.first_pass_tile_size = 9;
f32_maxpool_config.remainder_pass_tile_size = 8;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f32_maxpool_minmax_ukernel_9p8x__wasmsimd_x86_c4;
f32_maxpool_config.init.f32 = xnn_init_f32_minmax_wasmsimd_params;
f32_maxpool_config.first_pass_tile_size = 9;
f32_maxpool_config.remainder_pass_tile_size = 8;
} else {
f32_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f32_maxpool_minmax_ukernel_9p8x__wasmsimd_arm_c4;
f32_maxpool_config.init.f32 = xnn_init_f32_minmax_wasmsimd_params;
f32_maxpool_config.first_pass_tile_size = 9;
f32_maxpool_config.remainder_pass_tile_size = 8;
}
#elif XNN_ARCH_WASM
f32_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f32_maxpool_minmax_ukernel_9p8x__wasm_c1;
f32_maxpool_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_maxpool_config.first_pass_tile_size = 9;
f32_maxpool_config.remainder_pass_tile_size = 8;
#elif XNN_ARCH_RISCV
f32_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1;
f32_maxpool_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_maxpool_config.first_pass_tile_size = 9;
f32_maxpool_config.remainder_pass_tile_size = 8;
#endif
}
static void init_s8_maxpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
s8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_s8_maxpool_minmax_ukernel_9p8x__neon_c16;
s8_maxpool_config.init.s8 = xnn_init_s8_minmax_neon_params;
s8_maxpool_config.first_pass_tile_size = 9;
s8_maxpool_config.remainder_pass_tile_size = 8;
} else if (!XNN_PLATFORM_MOBILE) {
s8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_s8_maxpool_minmax_ukernel_9p8x__scalar_c1;
s8_maxpool_config.init.s8 = xnn_init_s8_minmax_scalar_params;
s8_maxpool_config.first_pass_tile_size = 9;
s8_maxpool_config.remainder_pass_tile_size = 8;
}
#elif XNN_ARCH_ARM64
s8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_s8_maxpool_minmax_ukernel_9p8x__neon_c16;
s8_maxpool_config.init.s8 = xnn_init_s8_minmax_neon_params;
s8_maxpool_config.first_pass_tile_size = 9;
s8_maxpool_config.remainder_pass_tile_size = 8;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_sse4_1) {
s8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_s8_maxpool_minmax_ukernel_9p8x__sse41_c16;
s8_maxpool_config.init.s8 = xnn_init_s8_minmax_sse4_params;
s8_maxpool_config.first_pass_tile_size = 9;
s8_maxpool_config.remainder_pass_tile_size = 8;
} else {
s8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_s8_maxpool_minmax_ukernel_9p8x__sse2_c16;
s8_maxpool_config.init.s8 = xnn_init_s8_minmax_sse2_params;
s8_maxpool_config.first_pass_tile_size = 9;
s8_maxpool_config.remainder_pass_tile_size = 8;
}
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
s8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_s8_maxpool_minmax_ukernel_9p8x__wasmsimd_c16;
s8_maxpool_config.init.s8 = xnn_init_s8_minmax_wasmsimd_params;
s8_maxpool_config.first_pass_tile_size = 9;
s8_maxpool_config.remainder_pass_tile_size = 8;
#elif XNN_ARCH_WASM
s8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_s8_maxpool_minmax_ukernel_9p8x__scalar_c1;
s8_maxpool_config.init.s8 = xnn_init_s8_minmax_scalar_params;
s8_maxpool_config.first_pass_tile_size = 9;
s8_maxpool_config.remainder_pass_tile_size = 8;
#elif XNN_ARCH_RISCV
s8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_s8_maxpool_minmax_ukernel_9p8x__scalar_c1;
s8_maxpool_config.init.s8 = xnn_init_s8_minmax_scalar_params;
s8_maxpool_config.first_pass_tile_size = 9;
s8_maxpool_config.remainder_pass_tile_size = 8;
#endif
}
static void init_u8_maxpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
u8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16;
u8_maxpool_config.init.u8 = xnn_init_u8_minmax_neon_params;
u8_maxpool_config.first_pass_tile_size = 9;
u8_maxpool_config.remainder_pass_tile_size = 8;
} else if (!XNN_PLATFORM_MOBILE) {
u8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_u8_maxpool_minmax_ukernel_9p8x__scalar_c1;
u8_maxpool_config.init.u8 = xnn_init_u8_minmax_scalar_params;
u8_maxpool_config.first_pass_tile_size = 9;
u8_maxpool_config.remainder_pass_tile_size = 8;
}
#elif XNN_ARCH_ARM64
u8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_u8_maxpool_minmax_ukernel_9p8x__neon_c16;
u8_maxpool_config.init.u8 = xnn_init_u8_minmax_neon_params;
u8_maxpool_config.first_pass_tile_size = 9;
u8_maxpool_config.remainder_pass_tile_size = 8;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
u8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_u8_maxpool_minmax_ukernel_9p8x__sse2_c16;
u8_maxpool_config.init.u8 = xnn_init_u8_minmax_sse2_params;
u8_maxpool_config.first_pass_tile_size = 9;
u8_maxpool_config.remainder_pass_tile_size = 8;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
u8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_u8_maxpool_minmax_ukernel_9p8x__wasmsimd_c16;
u8_maxpool_config.init.u8 = xnn_init_u8_minmax_wasmsimd_params;
u8_maxpool_config.first_pass_tile_size = 9;
u8_maxpool_config.remainder_pass_tile_size = 8;
#elif XNN_ARCH_WASM
u8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_u8_maxpool_minmax_ukernel_9p8x__scalar_c1;
u8_maxpool_config.init.u8 = xnn_init_u8_minmax_scalar_params;
u8_maxpool_config.first_pass_tile_size = 9;
u8_maxpool_config.remainder_pass_tile_size = 8;
#elif XNN_ARCH_RISCV
u8_maxpool_config.ukernel = (xnn_maxpool_ukernel_fn) xnn_u8_maxpool_minmax_ukernel_9p8x__scalar_c1;
u8_maxpool_config.init.u8 = xnn_init_u8_minmax_scalar_params;
u8_maxpool_config.first_pass_tile_size = 9;
u8_maxpool_config.remainder_pass_tile_size = 8;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_maxpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_maxpool_config();
return TRUE;
}
static BOOL CALLBACK init_f32_maxpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_maxpool_config();
return TRUE;
}
static BOOL CALLBACK init_s8_maxpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_s8_maxpool_config();
return TRUE;
}
static BOOL CALLBACK init_u8_maxpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_u8_maxpool_config();
return TRUE;
}
#endif
const struct xnn_maxpool_config* xnn_init_f16_maxpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_maxpool, &init_f16_maxpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_maxpool, &init_f16_maxpool_config);
#endif
return &f16_maxpool_config;
}
const struct xnn_maxpool_config* xnn_init_f32_maxpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_maxpool, &init_f32_maxpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_maxpool, &init_f32_maxpool_config);
#endif
return &f32_maxpool_config;
}
const struct xnn_maxpool_config* xnn_init_s8_maxpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_s8_maxpool, &init_s8_maxpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_s8_maxpool, &init_s8_maxpool_config);
#endif
return &s8_maxpool_config;
}
const struct xnn_maxpool_config* xnn_init_u8_maxpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_u8_maxpool, &init_u8_maxpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_u8_maxpool, &init_u8_maxpool_config);
#endif
return &u8_maxpool_config;
}
| 13,390 | 45.33564 | 114 |
c
|
XNNPACK
|
XNNPACK-master/src/memory-planner.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/memory-planner.h>
#include <xnnpack/subgraph.h>
// Check if two xnn_value's lifecycles overlap.
inline static bool value_lifecycle_overlap(const struct xnn_usage_record* a, const struct xnn_usage_record* b) {
assert(a->last_node >= a->first_node);
assert(b->last_node >= b->first_node);
if (a->first_node < b->first_node) {
return a->last_node >= b->first_node;
} else {
return b->last_node >= a->first_node;
}
}
// Use this comparison function to sort xnn_usage_record according to the
// tensor_size in decreasing order.
static inline int cmp_value_usage_tensor_size(const void* a, const void* b) {
const size_t tensor_size_a = (*(struct xnn_usage_record *const*)a)->tensor_size;
const size_t tensor_size_b = (*(struct xnn_usage_record *const*)b)->tensor_size;
return (tensor_size_b > tensor_size_a) - (tensor_size_b < tensor_size_a);
}
static void populate_value_lifecycle(const struct xnn_runtime* runtime, struct xnn_usage_record* usage) {
assert(runtime != NULL);
if (runtime->num_ops == 0) {
return;
}
// As we initialized first/last_node in each xnn_usage_record to 0 as in 'xnn_init_value_mem_allocation_tracker',
// we start with the second node to tell whether first/last_node have been set or not, and check the first node last.
for (uint32_t nid = 1; nid < runtime->num_ops; ++nid) {
const struct xnn_operator_data* opdata = runtime->opdata + nid;
for (uint32_t i = 0; i < opdata->num_inputs; ++i) {
if (opdata->inputs[i] == XNN_INVALID_VALUE_ID) {
continue; // Optimized away.
}
if (usage[opdata->inputs[i]].first_node == 0) {
usage[opdata->inputs[i]].first_node = nid;
}
usage[opdata->inputs[i]].last_node = nid;
}
for (uint32_t i = 0; i < opdata->num_outputs; ++i) {
if (opdata->outputs[i] == XNN_INVALID_VALUE_ID) {
continue; // Optimized away.
}
if (usage[opdata->outputs[i]].first_node == 0) {
usage[opdata->outputs[i]].first_node = nid;
}
usage[opdata->outputs[i]].last_node = nid;
}
}
const struct xnn_operator_data* first_node = runtime->opdata;
for (uint32_t i = 0; i < first_node->num_inputs; ++i) {
if (first_node->inputs[i] == XNN_INVALID_VALUE_ID) {
continue; // Optimized away.
}
usage[first_node->inputs[i]].first_node = 0;
}
for (uint32_t i = 0; i < first_node->num_outputs; ++i) {
if (first_node->outputs[i] == XNN_INVALID_VALUE_ID) {
continue; // Optimized away.
}
usage[first_node->outputs[i]].first_node = 0;
}
// Separate loop over all values to make sure we have usage records properly initialized with invalid reuse_value_id.
// Some usage records are not associated with any nodes, and they will not be visited by the loops over nodes above.
for (uint32_t i = 0; i < runtime->num_values + runtime->num_ops; i++) {
usage[i].reuse_value_id = XNN_INVALID_VALUE_ID;
usage[i].alloc_offset = SIZE_MAX;
usage[i].opdata_id = XNN_INVALID_NODE_ID;
}
}
// Represent a memory block [start, end)
struct memory_block {
size_t start;
size_t end;
};
// Use this comparison function to sort memory_block according to the 'start'
// in increasing order.
static inline int cmp_memory_block(const void* a, const void* b) {
const size_t start_a = ((const struct memory_block*)a)->start;
const size_t start_b = ((const struct memory_block*)b)->start;
return (start_a > start_b) - (start_a < start_b);
}
// Given the current live memory blocks, return the offset in a memory arena for a to-be-allocated value of size
// 'to_alloc_size'.
static size_t find_value_alloc_offset(struct memory_block* live_mem_blocks,
size_t num_mem_blocks,
size_t to_alloc_size) {
if (num_mem_blocks == 0) {
return 0;
}
if (num_mem_blocks == 1) {
return live_mem_blocks[0].end;
}
// Sort memory blocks according to 'start' in increasing order.
qsort(live_mem_blocks, num_mem_blocks, sizeof(struct memory_block), cmp_memory_block);
// Coalesce overlapping or immediate adjacent memory blocks to form a list of non-overlapping memory blocks in order
// to find the smallest gap.
size_t num_coalesced_mem_blocks = 1;
for (size_t i = 1; i < num_mem_blocks; ++i) {
const size_t current_coalesced_end =
live_mem_blocks[num_coalesced_mem_blocks - 1].end;
if (live_mem_blocks[i].start > current_coalesced_end) {
assert(num_coalesced_mem_blocks <= i);
live_mem_blocks[num_coalesced_mem_blocks] = live_mem_blocks[i];
num_coalesced_mem_blocks++;
continue;
}
if (live_mem_blocks[i].end > current_coalesced_end) {
live_mem_blocks[num_coalesced_mem_blocks - 1].end = live_mem_blocks[i].end;
}
}
size_t smallest_gap_size = SIZE_MAX;
// The first index to live_mem_blocks that the 'to_alloc_size' should be allocated after.
size_t smallest_gap_index = num_coalesced_mem_blocks - 1;
for (size_t i = 0; i < num_coalesced_mem_blocks - 1; ++i) {
assert(live_mem_blocks[i + 1].start > live_mem_blocks[i].end);
const size_t gap = live_mem_blocks[i + 1].start - live_mem_blocks[i].end;
if (gap >= to_alloc_size && gap < smallest_gap_size) {
smallest_gap_index = i;
smallest_gap_size = gap;
}
}
return live_mem_blocks[smallest_gap_index].end;
}
void xnn_init_value_allocation_tracker(
struct xnn_value_allocation_tracker* tracker,
const struct xnn_runtime* runtime)
{
tracker->mem_arena_size = 0;
tracker->usage = xnn_allocate_zero_memory(sizeof(struct xnn_usage_record) * (runtime->num_values + runtime->num_ops));
#if XNN_ENABLE_MEMOPT
populate_value_lifecycle(runtime, tracker->usage);
#endif
tracker->min_value_id = XNN_INVALID_VALUE_ID;
tracker->max_value_id = XNN_INVALID_VALUE_ID;
}
void xnn_mark_tensor_as_reuse(struct xnn_value_allocation_tracker* tracker,
uint32_t value_id,
uint32_t reuse_value_id,
uint32_t new_last_node) {
// Set tensor_size to 0 so memory planner will not try to find memory for these tensors.
tracker->usage[value_id].tensor_size = 0;
tracker->usage[value_id].reuse_value_id = reuse_value_id;
// The reused tensor has an expanded live-range.
tracker->usage[reuse_value_id].last_node = new_last_node;
}
void xnn_add_value_allocation_tracker(struct xnn_value_allocation_tracker* tracker,
uint32_t value_id,
size_t tensor_size) {
tracker->usage[value_id].tensor_size = tensor_size;
if (tracker->min_value_id == XNN_INVALID_VALUE_ID) {
tracker->min_value_id = value_id;
} else {
// Note that values are expected to be added in increasing order.
assert(value_id > tracker->min_value_id);
assert(value_id > tracker->max_value_id);
}
tracker->max_value_id = value_id;
}
void xnn_add_operator_workspace_allocation_tracker(
struct xnn_value_allocation_tracker* tracker,
uint32_t operator_workspace_value_id,
size_t tensor_size,
uint32_t opdata_id)
{
tracker->usage[operator_workspace_value_id].tensor_size = tensor_size;
if (tracker->min_value_id == XNN_INVALID_VALUE_ID) {
tracker->min_value_id = operator_workspace_value_id;
} else {
// Note that values are expected to be added in increasing order.
assert(operator_workspace_value_id > tracker->min_value_id);
assert(operator_workspace_value_id > tracker->max_value_id);
}
tracker->max_value_id = operator_workspace_value_id;
tracker->usage[operator_workspace_value_id].first_node = opdata_id;
tracker->usage[operator_workspace_value_id].last_node = opdata_id;
tracker->usage[operator_workspace_value_id].opdata_id = opdata_id;
}
void xnn_plan_value_allocation_tracker(struct xnn_value_allocation_tracker* tracker) {
#if XNN_ENABLE_MEMOPT
if (tracker->min_value_id == XNN_INVALID_VALUE_ID) {
assert(tracker->max_value_id == XNN_INVALID_VALUE_ID);
return;
}
const uint32_t num_values = tracker->max_value_id - tracker->min_value_id + 1;
struct xnn_usage_record** sorted_usage = xnn_allocate_zero_memory(sizeof(struct xnn_usage_record*) * num_values);
size_t num_values_to_alloc = 0;
for (size_t i = tracker->min_value_id; i <= tracker->max_value_id; ++i) {
struct xnn_usage_record* info = tracker->usage + i;
if (info->tensor_size != 0) {
sorted_usage[num_values_to_alloc++] = info;
}
}
qsort(sorted_usage, num_values_to_alloc, sizeof(struct xnn_usage_record*), cmp_value_usage_tensor_size);
// Start the allocation planning process.
struct memory_block* current_live_mem_blocks = xnn_allocate_zero_memory(
sizeof(struct memory_block) * num_values_to_alloc);
size_t mem_arena_size = 0;
for (size_t i = 0; i < num_values_to_alloc; ++i) {
size_t num_live_mem_blocks = 0;
struct xnn_usage_record* current = sorted_usage[i];
for (size_t j = 0; j < i; ++j) {
const struct xnn_usage_record* allocated = sorted_usage[j];
if (value_lifecycle_overlap(current, allocated)) {
current_live_mem_blocks[num_live_mem_blocks++] = (struct memory_block){
.start = allocated->alloc_offset,
.end = allocated->alloc_offset + allocated->tensor_size,
};
}
}
current->alloc_offset = find_value_alloc_offset(current_live_mem_blocks, num_live_mem_blocks, current->tensor_size);
if (mem_arena_size < current->alloc_offset + current->tensor_size) {
mem_arena_size = current->alloc_offset + current->tensor_size;
}
}
// Walk through all tensors that are reusing memory, and update their usage records.
for (size_t i = tracker->min_value_id; i <= tracker->max_value_id; ++i) {
struct xnn_usage_record* usage = &tracker->usage[i];
uint32_t reuse_id = usage->reuse_value_id;
if (reuse_id == XNN_INVALID_VALUE_ID) {
continue;
}
assert(tracker->usage[reuse_id].alloc_offset != SIZE_MAX);
usage->alloc_offset = tracker->usage[reuse_id].alloc_offset;
}
tracker->mem_arena_size = mem_arena_size;
xnn_release_memory(sorted_usage);
xnn_release_memory(current_live_mem_blocks);
#else
tracker->mem_arena_size = 0;
for (uint32_t i = tracker->min_value_id; i <= tracker->max_value_id; ++i) {
if (tracker->usage[i].tensor_size > 0) {
tracker->usage[i].alloc_offset = tracker->mem_arena_size;
tracker->mem_arena_size += tracker->usage[i].tensor_size;
}
}
#endif
}
| 10,813 | 39.200743 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/memory.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
// Include first for the platform detection macros.
#include <xnnpack/common.h>
#if XNN_PLATFORM_WEB
#include <emscripten/emscripten.h>
#endif
#if XNN_PLATFORM_WINDOWS
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#else
// This define needs to come first because errno include features.h and would have defined macros that lead to
// sys/mman.h not having mremap.
#if !defined(_GNU_SOURCE)
#define _GNU_SOURCE
#endif
#include <errno.h>
#include <sys/mman.h>
#include <unistd.h>
#endif
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/log.h>
#include <xnnpack/math.h>
#include <xnnpack/memory.h>
// Helpers to allocate/mmap and release memory used by both code and weights cache.
static size_t system_page_size = 0;
static size_t get_page_size() {
if (system_page_size == 0) {
// Get page size.
#if XNN_PLATFORM_WINDOWS
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
assert(sysinfo.dwPageSize != 0);
system_page_size = (size_t) sysinfo.dwPageSize;
#else
const long result = sysconf(_SC_PAGESIZE);
if (result == -1) {
xnn_log_fatal("failed to get page size, error code: %d", errno);
}
assert(result >= 0);
system_page_size = (size_t) result;
#endif
}
assert(is_po2(system_page_size));
return system_page_size;
}
// Maps `size` bytes of memory, returns pointer to allocation, NULL if failed.
static void* allocate_buffer(size_t size) {
xnn_log_debug("allocating buffer of size %zu", size);
assert(size % get_page_size() == 0);
#if XNN_PLATFORM_WINDOWS
void* p = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
if (p == NULL) {
xnn_log_error("failed to allocate %zu bytes for code/weights buffer, error code: %" PRIu32,
size, (uint32_t) GetLastError());
return NULL;
}
#else
#if XNN_PLATFORM_QURT
void* p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
#else
void* p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#endif
if (p == MAP_FAILED) {
xnn_log_error("failed to allocate %zu bytes for code/weights buffer, error code: %d", size, errno);
return NULL;
}
#endif
return p;
}
// Releases memory previously mapped by `allocate_buffer`, returns xnn_status_success on success.
static enum xnn_status release_memory(void* start, size_t capacity) {
#if XNN_PLATFORM_WINDOWS
// We only decommited any unused capacity, so we release all of it now.
if (!VirtualFree(start, 0, MEM_RELEASE)) {
xnn_log_error("failed to release code/weights buffer, error code: %" PRIu32, (uint32_t) GetLastError());
return xnn_status_invalid_state;
}
#else
if (munmap(start, capacity) == -1) {
xnn_log_error("failed to release code/weights buffer, error code: %d", errno);
return xnn_status_invalid_state;
}
#endif
return xnn_status_success;
}
// Resize a buffer at old_pointer of size old_bytes to new_size. The actual new size of the resized buffer is written to
// new_capacity_out, which can be >= new_size due to page alignment requirements.
// Returns a pointer to a buffer which might be the same as old_pointer if we can remap virtual memory, otherwise we
// allocate a new buffer and copy contents of old_buffer over.
static void* resize_buffer(
void* old_pointer, size_t old_size, size_t old_capacity, size_t new_size, size_t* new_capacity_out)
{
const size_t new_capacity = round_up_po2(new_size, get_page_size());
#if XNN_PLATFORM_LINUX
void* new_pointer = mremap(old_pointer, old_capacity, new_capacity, MREMAP_MAYMOVE, NULL);
if (new_pointer == MAP_FAILED) {
xnn_log_error("mremap failed with errno: %d", errno);
return NULL;
}
xnn_log_debug("resize_buffer: remap, old capacity %zu to new capacity %zu", old_capacity, new_capacity);
#else
void* new_pointer = allocate_buffer(new_capacity);
if (new_pointer == NULL) {
xnn_log_error("allocate_buffer failed");
return NULL;
}
memcpy(new_pointer, old_pointer, old_size);
// Release old code_buffer.
const enum xnn_status status = release_memory(old_pointer, old_capacity);
if (status != xnn_status_success) {
xnn_log_error("releasing old buffer failed, this could be a leak of %zu bytes", old_capacity);
// Log but proceed as per normal since we successfully allocated a new memory that can be used by the caller.
}
xnn_log_debug("resize_buffer: allocate memory, old capacity %zu to new capacity %zu", old_capacity, new_capacity);
#endif
*new_capacity_out = new_capacity;
return new_pointer;
}
enum xnn_status xnn_allocate_code_memory(struct xnn_code_buffer* buffer, size_t size) {
memset(buffer, 0, sizeof(struct xnn_code_buffer));
const size_t page_aligned_size = round_up_po2(size, get_page_size());
buffer->start = allocate_buffer(page_aligned_size);
if (buffer->start == NULL) {
return xnn_status_out_of_memory;
}
buffer->size = 0;
buffer->capacity = page_aligned_size;
return xnn_status_success;
}
// Releases unused memory. Will write the new capacity to `capacity`.
static enum xnn_status release_unused_memory(size_t size, void* start, size_t* capacity) {
// Release all unused pages.
const size_t page_aligned_size = round_up_po2(size, get_page_size());
const uint8_t* mem_start = (uint8_t*) start;
const uint8_t* unused_start = mem_start + page_aligned_size;
assert(*capacity >= page_aligned_size);
const size_t unused_capacity = *capacity - page_aligned_size;
xnn_log_debug("releasing memory, start %p, used: %zu, capacity: %zu, unused %zu", mem_start, size, *capacity,
unused_capacity);
if (unused_capacity != 0) {
// Free unused pages.
#if XNN_PLATFORM_WINDOWS
// We cannot selectively release pages inside the region of pages, so just decommit them.
if (!VirtualFree((void*) unused_start, unused_capacity, MEM_DECOMMIT)) {
xnn_log_error("failed to unmap code/weights buffer, error code: %" PRIu32, (uint32_t) GetLastError());
return xnn_status_invalid_state;
}
*capacity = page_aligned_size;
#elif !XNN_PLATFORM_WEB
// Web does not support partial unmapping.
if (munmap((void*) unused_start, unused_capacity) == -1) {
xnn_log_error("failed to unmap code/weights buffer, error code: %d", errno);
return xnn_status_invalid_state;
}
*capacity = page_aligned_size;
#else
if (unused_capacity == *capacity) {
if (munmap((void*) unused_start, unused_capacity) == -1) {
xnn_log_error("failed to unmap code/weights buffer, error code: %d", errno);
return xnn_status_invalid_state;
} else {
*capacity = 0;
}
}
#endif
}
return xnn_status_success;
}
enum xnn_memory_permission {
xnn_memory_permission_read_only,
xnn_memory_permission_read_execute,
};
static enum xnn_status set_memory_permission(void* start, size_t size, enum xnn_memory_permission permission) {
#if XNN_PLATFORM_WINDOWS
DWORD old = 0, prot = 0;
switch (permission) {
case xnn_memory_permission_read_only:
prot = PAGE_READONLY;
break;
case xnn_memory_permission_read_execute:
prot = PAGE_EXECUTE_READ;
break;
default:
XNN_UNREACHABLE;
}
if (!VirtualProtect(start, size, prot, &old)) {
xnn_log_error(
"failed to set memory permission (%d), error code: %" PRIu32, permission, (uint32_t) GetLastError());
return xnn_status_invalid_state;
}
#elif XNN_PLATFORM_WEB
// Memory protection not supported on Web.
return xnn_status_success;
#else
int prot = 0;
switch (permission) {
case xnn_memory_permission_read_only:
prot = PROT_READ;
break;
case xnn_memory_permission_read_execute:
prot = PROT_READ | PROT_EXEC;
break;
default:
XNN_UNREACHABLE;
}
if (mprotect(start, size, prot) == -1) {
xnn_log_error("failed to set memory permission (%d), error code: %d", permission, errno);
return xnn_status_invalid_state;
}
#endif
return xnn_status_success;
}
#if XNN_PLATFORM_WEB
EM_JS(int, xnnLoadWasmModuleJS, (const uint8_t* code, int offset, int offset_end, int invalid_function_index), {
const tableOriginalSize = wasmTable.length;
const binary = new Uint8Array(HEAPU8.slice(code + offset, code + offset_end));
try {
var module = new WebAssembly.Module(binary);
var instance = new WebAssembly.Instance(module, {env : {memory: wasmMemory}});
for (var symName in instance.exports) {
var value = instance.exports[symName];
addFunction(value);
}
if (tableOriginalSize < wasmTable.length) {
return tableOriginalSize;
}
return invalid_function_index;
}
catch(error) {
console.log(error);
return invalid_function_index;
}
});
#endif // XNN_PLATFORM_WEB
#if XNN_PLATFORM_JIT
enum xnn_status xnn_finalize_code_memory(struct xnn_code_buffer* buffer) {
#if XNN_PLATFORM_WEB
return xnn_status_success;
#else
const enum xnn_status status = release_unused_memory(buffer->size, buffer->start, &buffer->capacity);
if (status != xnn_status_success) {
return status;
}
if (buffer->capacity == 0) {
return xnn_status_success;
}
// Flush icache, do it before changing permissions due to bugs on older ARM64 kernels.
#if (XNN_ARCH_ARM || XNN_ARCH_ARM64) && XNN_PLATFORM_JIT
#if XNN_PLATFORM_WINDOWS
FlushInstructionCache(GetCurrentProcess(), buffer->start, buffer->capacity);
#else
// iOS toolchain doesn't support this, use sys_icache_invalidate, when we support iOS.
__builtin___clear_cache(buffer->start, (void*) ((uint8_t*) buffer->start + buffer->capacity));
#endif // XNN_PLATFORM_WINDOWS
#endif // (XNN_ARCH_ARM || XNN_ARCH_ARM64) && !XNN_PLATFORM_IOS
// Set permissions to RX (no write).
return set_memory_permission(buffer->start, buffer->size, xnn_memory_permission_read_execute);
#endif // XNN_PLATFORM_WEB
}
uintptr_t xnn_first_function_in_chunk_ptr(struct xnn_code_buffer* buffer, size_t offset, size_t offset_end) {
#if (XNN_ARCH_ARM || XNN_ARCH_ARM64)
return (uintptr_t) buffer->start + offset;
#elif XNN_PLATFORM_WEB
if (offset == offset_end) {
return XNN_INVALID_FUNCTION_INDEX;
}
return xnnLoadWasmModuleJS(buffer->start, offset, offset_end, XNN_INVALID_FUNCTION_INDEX);
#endif
}
#endif // XNN_PLATFORM_JIT
enum xnn_status xnn_release_code_memory(struct xnn_code_buffer* buffer) {
if (buffer->capacity == 0) {
return xnn_status_success;
}
const enum xnn_status status = release_memory(buffer->start, buffer->capacity);
if (status != xnn_status_success) {
return status;
}
memset(buffer, 0, sizeof(struct xnn_code_buffer));
return xnn_status_success;
}
enum xnn_status xnn_reserve_code_memory(struct xnn_code_buffer* buffer, size_t min_available_size) {
if (buffer->size + min_available_size <= buffer->capacity) {
return xnn_status_success;
}
xnn_log_debug("reserving code memory of size %zu", min_available_size);
size_t new_capacity = 0;
void* new_start =
resize_buffer(buffer->start, buffer->size, buffer->capacity, buffer->size + min_available_size, &new_capacity);
if (new_start == NULL) {
xnn_log_error("failed to reserve code memory");
return xnn_status_out_of_memory;
}
buffer->start = new_start;
buffer->capacity = new_capacity;
return xnn_status_success;
}
enum xnn_status xnn_allocate_weights_memory(struct xnn_weights_buffer* buffer, size_t size) {
memset(buffer, 0, sizeof(struct xnn_weights_buffer));
const size_t page_aligned_size = round_up_po2(size, get_page_size());
buffer->start = allocate_buffer(page_aligned_size);
if (buffer->start == NULL) {
return xnn_status_out_of_memory;
}
buffer->size = 0;
buffer->capacity = page_aligned_size;
return xnn_status_success;
}
enum xnn_status xnn_release_weights_memory(struct xnn_weights_buffer* buffer) {
if (buffer->capacity == 0) {
return xnn_status_success;
}
const enum xnn_status status = release_memory(buffer->start, buffer->capacity);
if (status != xnn_status_success) {
return status;
}
memset(buffer, 0, sizeof(struct xnn_code_buffer));
return xnn_status_success;
}
enum xnn_status xnn_reserve_weights_memory(struct xnn_weights_buffer* buffer, size_t min_available_size) {
if (buffer->size + min_available_size <= buffer->capacity) {
xnn_log_debug("reserving weights memory of size %zu without growing buffer", min_available_size);
return xnn_status_success;
}
size_t new_capacity = 0;
void* new_start =
resize_buffer(buffer->start, buffer->size, buffer->capacity, buffer->size + min_available_size, &new_capacity);
if (new_start == NULL) {
xnn_log_error("failed to reserve weights memory");
return xnn_status_out_of_memory;
}
buffer->start = new_start;
buffer->capacity = new_capacity;
return xnn_status_success;
}
enum xnn_status xnn_finalize_weights_memory(struct xnn_weights_buffer* buffer) {
const enum xnn_status status = release_unused_memory(buffer->size, buffer->start, &buffer->capacity);
if (status != xnn_status_success) {
return status;
}
if (buffer->capacity == 0) {
return xnn_status_success;
}
return set_memory_permission(buffer->start, buffer->size, xnn_memory_permission_read_only);
}
| 13,696 | 34.210797 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/microkernel-utils.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/microkernel-utils.h>
static size_t dwconv_num_middle_pass(
size_t kernel_size,
size_t first_pass_tile,
size_t middle_pass_tile,
size_t last_pass_tile)
{
return divide_round_up(doz(kernel_size, first_pass_tile + last_pass_tile), middle_pass_tile);
}
size_t xnn_dwconv_multipass_tile_size(
size_t kernel_size,
size_t first_pass_tile,
size_t middle_pass_tile,
size_t last_pass_tile)
{
assert(kernel_size > first_pass_tile);
// We always have a first and last pass. We run as many middle pass as possible.
// E.g. kernel_size == 9, first_pass_tile = 2, middle_pass_tile = 3, last_pass_tile == 3.
// 1 first pass (8 left), 2 middle pass (2 left), last pass (with remainder 1).
return (first_pass_tile + last_pass_tile +
round_up(doz(kernel_size, first_pass_tile + last_pass_tile), middle_pass_tile));
}
size_t xnn_dwconv_multipass_weights_size(
size_t tile_size,
size_t channels,
size_t channel_tile,
size_t channel_subtile,
size_t channel_round,
size_t bias_element_size,
size_t log2_filter_element_size,
size_t extra_weights_byte)
{
// First and middle pass runs as many channel_tile-sized loops as possible, and can over-read up to channel_round.
const size_t subtiled_channels = round_up_po2(channels, channel_round);
// Always have a first and last pass.
size_t c_stride = (round_down_po2(subtiled_channels, channel_tile) +
// handle the remainder in channel_subtile loops.
round_up_po2(mod_po2(subtiled_channels, channel_tile), channel_subtile));
return ((tile_size << log2_filter_element_size) + bias_element_size + extra_weights_byte) * c_stride;
}
size_t xnn_dwconv_multipass_bytes_read(
size_t kernel_size,
size_t first_pass_tile,
size_t middle_pass_tile,
size_t last_pass_tile,
size_t channels,
size_t channel_tile,
size_t channel_subtile,
size_t channel_round,
size_t log2_input_size,
size_t log2_filter_size,
size_t bias_element_size,
size_t log2_accumulator_size)
{
const size_t num_middle_pass = dwconv_num_middle_pass(kernel_size, first_pass_tile, middle_pass_tile, last_pass_tile);
const size_t tile_size = first_pass_tile + num_middle_pass * middle_pass_tile + last_pass_tile;
const size_t rounded_channels = round_up_po2(channels, channel_round);
const size_t input_elements_read = tile_size * rounded_channels;
const size_t weight_elements_read = tile_size * rounded_channels;
const size_t bias_elements_read = rounded_channels;
// Middle pass reads num_middle_pass * rounded_channels buffers. Last pass reads tiled_channel_buffers.
// This is equivalent to (num_middle_pass + 1) * rounded_channels.
const size_t buffer_elements_read = (num_middle_pass + 1) * rounded_channels;
return (input_elements_read << log2_input_size) + (weight_elements_read << log2_filter_size) +
(bias_elements_read * bias_element_size) + (buffer_elements_read << log2_accumulator_size);
}
size_t xnn_dwconv_multipass_bytes_written(
size_t kernel_size,
size_t first_pass_tile,
size_t middle_pass_tile,
size_t last_pass_tile,
size_t channels,
size_t channel_round,
size_t log2_accumulator_size,
size_t log2_output_size)
{
// First pass writes rounded_channels elements to buffer, middle pass writes num_middle_pass * rounded_channels
// elements to buffer. Last pass writes channels elements to output.
// This is equivalent to (1 + num_middle_pass) * rounded_channels + channels elements.
const size_t num_middle_pass = dwconv_num_middle_pass(kernel_size, first_pass_tile, middle_pass_tile, last_pass_tile);
const size_t rounded_channels = round_up_po2(channels, channel_round);
const size_t buffer_elements_written = (1 + num_middle_pass) * rounded_channels;
const size_t output_elements_written = channels;
return (buffer_elements_written << log2_accumulator_size) + (output_elements_written << log2_output_size);
}
| 4,131 | 40.737374 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/mutex.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/common.h>
#include <xnnpack/log.h>
#include <xnnpack/mutex.h>
#if XNN_PLATFORM_WINDOWS
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#elif XNN_PLATFORM_MACOS || XNN_PLATFORM_IOS
#include <dispatch/dispatch.h>
#else
#include <pthread.h>
#endif
enum xnn_status xnn_mutex_init(struct xnn_mutex* mutex) {
#if XNN_PLATFORM_WINDOWS
mutex->handle = CreateMutexW(
/* security attributes */ NULL,
/* initially owned */ FALSE,
/* name */ NULL);
if (mutex->handle == NULL) {
xnn_log_error("failed to initialize mutex, error code: %" PRIu32, (uint32_t) GetLastError());
return xnn_status_out_of_memory;
}
#elif XNN_PLATFORM_MACOS || XNN_PLATFORM_IOS
mutex->semaphore = dispatch_semaphore_create(1);
if (mutex->semaphore == NULL) {
xnn_log_error("failed to initialize mutex");
return xnn_status_out_of_memory;
}
#elif !XNN_PLATFORM_WEB || defined(__EMSCRIPTEN_PTHREADS__)
const int ret = pthread_mutex_init(&mutex->mutex, NULL);
if (ret != 0) {
xnn_log_error("failed to initialize mutex, error code: %d", ret);
return xnn_status_out_of_memory;
}
#endif
return xnn_status_success;
}
enum xnn_status xnn_mutex_lock(struct xnn_mutex* mutex) {
#if XNN_PLATFORM_WINDOWS
const DWORD wait_result = WaitForSingleObject(mutex->handle, INFINITE);
if (WAIT_OBJECT_0 != wait_result) {
xnn_log_error("failed to lock mutex, error code: %" PRIu32, (uint32_t) wait_result);
return xnn_status_invalid_state;
}
#elif XNN_PLATFORM_MACOS || XNN_PLATFORM_IOS
const int wait_result = dispatch_semaphore_wait(mutex->semaphore, DISPATCH_TIME_FOREVER);
if (0 != wait_result) {
xnn_log_error("failed to lock mutex, error code: %d", wait_result);
return xnn_status_invalid_state;
}
#elif !XNN_PLATFORM_WEB || defined(__EMSCRIPTEN_PTHREADS__)
const int ret = pthread_mutex_lock(&mutex->mutex);
if (ret != 0) {
xnn_log_error("failed to lock mutex, error code: %d", ret);
return xnn_status_invalid_state;
}
#endif
return xnn_status_success;
}
enum xnn_status xnn_mutex_unlock(struct xnn_mutex* mutex) {
#if XNN_PLATFORM_WINDOWS
if (ReleaseMutex(mutex->handle) == 0) {
xnn_log_error("failed to unlock mutex, error code: %" PRIu32, (uint32_t) GetLastError());
return xnn_status_invalid_state;
}
#elif XNN_PLATFORM_MACOS || XNN_PLATFORM_IOS
dispatch_semaphore_signal(mutex->semaphore);
#elif !XNN_PLATFORM_WEB || defined(__EMSCRIPTEN_PTHREADS__)
const int ret = pthread_mutex_unlock(&mutex->mutex);
if (ret != 0) {
xnn_log_error("failed to unlock mutex, error code: %d", ret);
return xnn_status_invalid_state;
}
#endif
return xnn_status_success;
}
enum xnn_status xnn_mutex_destroy(struct xnn_mutex* mutex) {
#if XNN_PLATFORM_WINDOWS
if (CloseHandle(mutex->handle) == 0) {
xnn_log_error("failed to destroy mutex, error code: %" PRIu32, (uint32_t) GetLastError());
return xnn_status_invalid_state;
}
#elif XNN_PLATFORM_MACOS || XNN_PLATFORM_IOS
dispatch_release(mutex->semaphore);
#elif !XNN_PLATFORM_WEB || defined(__EMSCRIPTEN_PTHREADS__)
const int ret = pthread_mutex_destroy(&mutex->mutex);
if (ret != 0) {
xnn_log_error("failed to destroy mutex, error code: %d", ret);
return xnn_status_invalid_state;
}
#endif
memset(mutex, 0, sizeof(struct xnn_mutex));
return xnn_status_success;
}
| 3,578 | 31.834862 | 97 |
c
|
XNNPACK
|
XNNPACK-master/src/normalization.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <stdbool.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <xnnpack.h>
#include <xnnpack/math.h>
void xnn_normalize_slice(
const size_t num_dims,
const size_t offsets[XNN_MIN_ELEMENTS(1)],
const size_t sizes[XNN_MIN_ELEMENTS(1)],
const size_t input_shape[XNN_MIN_ELEMENTS(1)],
size_t normalized_offsets[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t normalized_input_shape[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t normalized_output_shape[XNN_MIN_ELEMENTS(XNN_MAX_TENSOR_DIMS)],
size_t* num_normalized_dims)
{
*num_normalized_dims = num_dims;
for (size_t i = 0; i < XNN_MAX_TENSOR_DIMS; i++) {
normalized_offsets[i] = 0;
normalized_input_shape[i] = 1;
normalized_output_shape[i] = 1;
}
// First normalization pass will remove all slices of size 1, by merging it to an adjacent inner dimension.
size_t num_size_one = 0;
for (size_t i = 0; i < num_dims; i++) {
const size_t offset = offsets[num_dims - 1 - i];
const size_t size = sizes[num_dims - 1 - i];
const size_t input_dim = input_shape[num_dims - 1 - i];
// If the innermost dimension is size 1, we can't merge it anywhere, so skip it.
if (size == 1 && i != 0) {
normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - i + 1 + num_size_one] +=
offset * normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - i + 1 + num_size_one];
normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - i + 1 + num_size_one] *= input_dim;
normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - i + 1 + num_size_one] *= size;
num_size_one++;
} else {
normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - i + num_size_one] = offset;
normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - i + num_size_one] = input_dim;
normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - i + num_size_one] = size;
}
}
size_t new_num_dims = num_dims - num_size_one;
size_t output_dims = new_num_dims;
bool merge_previous_dim = false;
size_t num_sliced_dims = 0;
for (size_t i = 0; i < new_num_dims; i++) {
const size_t offset = normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - i];
const size_t size = normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - i];
const size_t input_dim = normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - i];
const bool merge_current_dim = (offset == 0 && size == input_dim) ;
if (merge_previous_dim) {
normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] =
offset * normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims];
normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] *= input_dim;
normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] *= size;
output_dims -= 1;
if (!merge_current_dim) {
num_sliced_dims += 1;
}
} else {
normalized_offsets[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] = offset;
normalized_input_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] = input_dim;
normalized_output_shape[XNN_MAX_TENSOR_DIMS - 1 - num_sliced_dims] = size;
if (!merge_current_dim) {
// If merge_current_dim, we can merge current dimension with the next dim, so don't advance num_sliced_dims.
num_sliced_dims += 1;
}
}
merge_previous_dim = merge_current_dim;
}
// new_num_dims <= num_dims due to merge of size == 1, so we are left with some extra values at the front of the
// normalized values, set them to default values.
for (size_t i = 0; i < XNN_MAX_TENSOR_DIMS - output_dims; i++) {
normalized_offsets[i] = 0;
normalized_input_shape[i] = 1;
normalized_output_shape[i] = 1;
}
*num_normalized_dims = output_dims;
}
// Returns true if input stride and output stride are NULL or the expected input/output stride matches the actual input/output stride.
static bool can_dimension_be_removed(
const size_t* input_stride,
const size_t* output_stride,
const size_t* shape,
const size_t* inverted_perm,
size_t input_dim,
const size_t num_dims) {
const size_t output_dim = inverted_perm[input_dim];
if (input_dim == 0 && output_dim == 0) {
return true;
}
if (input_stride != NULL && input_dim > 0) {
if (input_stride[input_dim - 1] != input_stride[input_dim] * shape[input_dim]) {
return false;
}
}
if (output_stride != NULL && output_dim > 0) {
if (output_stride[output_dim - 1] != output_stride[output_dim] * shape[input_dim]) {
return false;
}
}
return true;
}
// Remove dimension perm[dim] from shape, perm, input & output strides.
static void fold_into_previous_dim(
size_t* shape,
size_t* perm,
size_t* inverted_perm,
size_t* input_stride,
size_t* output_stride,
size_t num_dims,
size_t input_dim)
{
const size_t perm_idx = inverted_perm[input_dim];
// Update preceding dimension size.
if (input_dim > 0) {
shape[input_dim - 1] *= shape[input_dim];
}
// Shift shape to the left to overwrite the squashed dim.
for (size_t j = input_dim; j + 1 < num_dims; ++j) {
shape[j] = shape[j + 1];
}
// Shift strides to the left to overwrite the squashed dim.
if (input_stride != NULL) {
for (size_t j = max(1, input_dim) - 1; j + 1 < num_dims; ++j) {
input_stride[j] = input_stride[j + 1];
}
}
if (output_stride != NULL) {
for (size_t j = max(1, perm_idx) - 1; j + 1 < num_dims; ++j) {
output_stride[j] = output_stride[j + 1];
}
}
// Update dimensions that were greater than the one removed.
for (size_t j = 0; j < num_dims; ++j) {
if (perm[j] > input_dim) {
perm[j] -= 1;
}
}
// Shift permutation.
for (size_t j = perm_idx; j + 1 < num_dims; ++j) {
perm[j] = perm[j + 1];
}
// Update the inverted perm.
for (size_t j = 0; j + 1 < num_dims; ++j) {
inverted_perm[perm[j]] = j;
}
}
void xnn_normalize_transpose_permutation(
const size_t num_dims,
const size_t element_size,
const size_t* perm,
const size_t* shape,
const size_t* input_stride,
const size_t* output_stride,
size_t* normalized_num_dims,
size_t* normalized_element_size_out,
size_t* normalized_perm,
size_t* normalized_shape,
size_t* normalized_input_stride,
size_t* normalized_output_stride)
{
size_t output_dims = num_dims;
memcpy(normalized_perm, perm, num_dims * sizeof(size_t));
memcpy(normalized_shape, shape, num_dims * sizeof(size_t));
size_t* normalized_input_stride_ptr = NULL;
size_t* normalized_output_stride_ptr = NULL;
if (input_stride != NULL) {
memcpy(normalized_input_stride, input_stride, num_dims * sizeof(size_t));
normalized_input_stride_ptr = normalized_input_stride;
}
if (output_stride != NULL) {
memcpy(normalized_output_stride, output_stride, num_dims * sizeof(size_t));
normalized_output_stride_ptr = normalized_output_stride;
}
size_t normalized_inverted_perm[XNN_MAX_TENSOR_DIMS];
for (size_t i = 0; i < num_dims; ++i) {
normalized_inverted_perm[perm[i]] = i;
}
size_t input_dim = 0;
// Remove dimensions of size 1 and fold dimensions which are adjacent in both input and output tensors.
while (input_dim < output_dims) {
const bool has_size_1 = normalized_shape[input_dim] == 1;
const bool previous_dim_in_output_is_previous_dim_in_input = input_dim > 0 &&
normalized_inverted_perm[input_dim] == normalized_inverted_perm[input_dim-1] + 1;
const bool strides_allow_fold_left = can_dimension_be_removed(
normalized_input_stride_ptr, normalized_output_stride_ptr, normalized_shape,
normalized_inverted_perm, input_dim, output_dims);
if (strides_allow_fold_left && (has_size_1 || previous_dim_in_output_is_previous_dim_in_input)) {
fold_into_previous_dim(normalized_shape, normalized_perm, normalized_inverted_perm,
normalized_input_stride_ptr, normalized_output_stride_ptr,
output_dims, input_dim);
output_dims -= 1;
// When a dimension has been removed, new folds may be possible so check
// it again.
if (input_dim > 0) {
input_dim -= 1;
}
} else {
input_dim += 1;
}
}
// All dimensions are size 1.
if (input_dim == 0) {
*normalized_num_dims = 1;
*normalized_element_size_out = element_size;
normalized_perm[0] = 0;
normalized_shape[0] = 1;
normalized_input_stride[0] = element_size;
normalized_output_stride[0] = element_size;
return;
}
// If The last input and output dimensions are the same, treat it as one large
// element.
size_t normalized_element_size = element_size;
if (normalized_perm[output_dims - 1] == output_dims - 1) {
const size_t last_dim = output_dims - 1;
normalized_element_size = element_size * normalized_shape[last_dim];
if (output_dims > 1 && can_dimension_be_removed(normalized_input_stride_ptr,
normalized_output_stride_ptr, normalized_shape,
normalized_inverted_perm, last_dim, output_dims)) {
output_dims -= 1;
} else {
if (normalized_input_stride != NULL) {
normalized_input_stride[last_dim] *= normalized_shape[last_dim];
}
if (normalized_output_stride != NULL) {
normalized_output_stride[normalized_perm[last_dim]] *= normalized_shape[last_dim];
}
normalized_shape[last_dim] = 1;
}
}
// If input_strides is not provided, calculate it using normalized_shape and normalized_element_size.
if (input_stride == NULL) {
normalized_input_stride[output_dims - 1] = normalized_element_size;
for (size_t i = output_dims - 1; i > 0; --i) {
normalized_input_stride[i - 1] = normalized_input_stride[i] * normalized_shape[i];
}
} else {
// Scale input_stride by element size.
for (size_t i = 0; i < output_dims; ++i) {
normalized_input_stride[i] *= element_size;
}
}
// If output_strides is not provided, calculate it using normalized_shape and normalized_element_size.
if (output_stride == NULL) {
normalized_output_stride[output_dims - 1] = normalized_element_size;
for (size_t i = output_dims - 1; i > 0; --i) {
normalized_output_stride[i - 1] = normalized_output_stride[i] * normalized_shape[normalized_perm[i]];
}
} else {
// Scale output_stride by element size.
for (size_t i = 0; i < output_dims; ++i) {
normalized_output_stride[i] *= element_size;
}
}
*normalized_element_size_out = normalized_element_size;
*normalized_num_dims = output_dims;
}
static int cmp_value_size_t(const void* a_ptr, const void* b_ptr) {
const size_t a = *((const size_t*) a_ptr);
const size_t b = *((const size_t*) b_ptr);
return (b < a) - (b > a);
}
void xnn_normalize_reduction(
size_t* num_reduction_axes_ptr,
size_t* reduction_axes,
size_t* num_input_dims_ptr,
size_t* input_dims)
{
size_t num_reduction_axes = *num_reduction_axes_ptr;
qsort(reduction_axes, num_reduction_axes, sizeof(size_t), cmp_value_size_t);
// The original number of input dimensions.
const size_t num_input_dims = *num_input_dims_ptr;
// Running variables for tracking sequences of adjacent axes, e.g. 1, 2, 3
size_t axes_sequence_start = SIZE_MAX;
size_t axes_sequence_length = 0;
// Running product of input dimensions for a sequence of adjacent axes, e.g. input_dims[1] * input_dims[2] * ...
size_t num_reduction_elements = 0;
// Tracking variables for consumed and produced input dimensions.
// Each consumed/produced input dimension is read/written only once.
// Invariant num_consumed_input_dims <= num_produced_input_dims holds at each iteration.
size_t num_consumed_input_dims = 0;
size_t num_produced_input_dims = 0;
// Tracking variables for consumed and produced reduction axes.
// Each consumed/produced reduction axis is read/written only once.
// Invariant consumed_reduction_axes <= &reduction_axes[num_produced_reduction_axes] holds at each iteration.
const size_t* consumed_reduction_axes = reduction_axes;
size_t num_produced_reduction_axes = 0;
for (; num_reduction_axes != 0; num_reduction_axes -= 1) {
const size_t axis = *consumed_reduction_axes++;
if (axis == axes_sequence_start + axes_sequence_length) {
// Continue a sequence of adjacent reduction axes.
axes_sequence_length += 1;
assert(axis == num_consumed_input_dims);
num_reduction_elements *= input_dims[num_consumed_input_dims++];
assert(num_consumed_input_dims <= num_input_dims);
} else {
if (axes_sequence_length != 0) {
// Write out merged input dimensions of the last axes sequence.
input_dims[num_produced_input_dims++] = num_reduction_elements;
}
// Start tracking a new sequence of adjacent reduction axes.
axes_sequence_start = axis;
axes_sequence_length = 1;
assert(num_consumed_input_dims <= axis);
if (num_consumed_input_dims != axis) {
// Merge input dimensions in the [num_consumed_input_dims:axis] range.
size_t normalized_dim = input_dims[num_consumed_input_dims++];
while (num_consumed_input_dims != axis) {
normalized_dim *= input_dims[num_consumed_input_dims++];
}
input_dims[num_produced_input_dims++] = normalized_dim;
assert(num_produced_input_dims <= num_consumed_input_dims);
}
assert(num_consumed_input_dims == axis);
// Adjust and write out the reduction axis.
const size_t num_eliminated_input_dims = num_consumed_input_dims - num_produced_input_dims;
reduction_axes[num_produced_reduction_axes++] = axis - num_eliminated_input_dims;
// Reinitialize the running product of input dimensions.
num_reduction_elements = input_dims[num_consumed_input_dims++];
assert(num_consumed_input_dims <= num_input_dims);
}
}
// If we're tracking a sequence of adjacent reduction axes, terminate it.
if (num_consumed_input_dims == axes_sequence_start + axes_sequence_length) {
input_dims[num_produced_input_dims++] = num_reduction_elements;
}
assert(num_produced_input_dims <= num_consumed_input_dims);
assert(num_consumed_input_dims <= num_input_dims);
// If there're input dims after the last reduction axis, normalize them.
if (num_consumed_input_dims != num_input_dims) {
// Merge input dimensions in the [num_consumed_input_dims:num_input_dims] range.
size_t normalized_dim = input_dims[num_consumed_input_dims++];
while (num_consumed_input_dims != num_input_dims) {
normalized_dim *= input_dims[num_consumed_input_dims++];
}
input_dims[num_produced_input_dims++] = normalized_dim;
assert(num_produced_input_dims <= num_consumed_input_dims);
}
assert(num_produced_input_dims <= num_consumed_input_dims);
assert(num_consumed_input_dims == num_input_dims);
*num_input_dims_ptr = num_produced_input_dims;
*num_reduction_axes_ptr = num_produced_reduction_axes;
}
| 15,186 | 38.965789 | 134 |
c
|
XNNPACK
|
XNNPACK-master/src/operator-delete.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <stdlib.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/log.h>
#include <xnnpack/operator.h>
#include <xnnpack/params.h>
enum xnn_status xnn_delete_operator(xnn_operator_t op)
{
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to delete operator: XNNPACK is not initialized");
return xnn_status_uninitialized;
}
if (op == NULL) {
return xnn_status_invalid_parameter;
}
xnn_release_memory(op->indirection_buffer);
if (op->weights_cache == NULL) {
xnn_release_simd_memory(op->packed_weights.pointer);
}
if (op->num_post_operation_params != 0) {
xnn_release_memory(op->post_operation_params);
}
xnn_release_simd_memory(op->zero_buffer);
xnn_release_memory(op->pixelwise_buffer);
xnn_release_memory(op->subconvolution_buffer);
xnn_release_simd_memory(op->lookup_table);
xnn_release_simd_memory(op);
return xnn_status_success;
}
| 1,190 | 26.697674 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/operator-utils.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack.h> // For xnn_operator_t.
#include <xnnpack/common.h> // For XNN_ALLOCATION_ALIGNMENT.
#include <xnnpack/cache.h> // For xnn_code_cache.
#include <xnnpack/log.h>
#include <xnnpack/math.h>
#include <xnnpack/operator.h> // For xnn_operator definition.
#include <xnnpack/operator-utils.h>
#if XNN_PLATFORM_JIT
// Generate code for a single set of parameters.
// Code is generated into the code cache, and the offset of the generated code is returned.
// If code already exists in code cache, the offset of the existing code is returned.
// Stores the value XNN_CACHE_NOT_FOUND in `offset` field when no code is generated.
static enum xnn_status get_generated_gemm(
xnn_jit_gemm_code_generator_fn generator,
const struct jit_gemm_params *jit_gemm_params,
size_t mr,
size_t group_output_channels,
size_t nr,
size_t group_input_channels_in_bytes,
struct xnn_code_cache* code_cache,
struct xnn_generated_code_chunk* code_chunk)
{
assert(code_cache != NULL);
size_t offset = XNN_CACHE_NOT_FOUND;
enum xnn_status status = xnn_status_success;
if (generator == NULL) {
status = xnn_status_uninitialized;
goto error;
}
status = xnn_reserve_code_memory(&code_cache->cache.code, XNN_DEFAULT_MICROKERNEL_SIZE);
if (xnn_status_success != status) {
xnn_log_error("failed to ensure sufficient space in the code buffer for a microkernel");
goto error;
}
const size_t old_size = code_cache->cache.code.size;
void* old_code = (uint8_t*) code_cache->cache.code.start + old_size;
status = generator(&code_cache->cache.code, mr, group_output_channels % nr,
group_input_channels_in_bytes, jit_gemm_params);
if (xnn_status_success != status) {
xnn_log_error("failed to generate GEMM microkernel");
goto error;
}
const size_t new_size = code_cache->cache.code.size;
const size_t code_size = new_size - old_size;
offset = xnn_get_or_insert_code_cache(code_cache, old_code, code_size);
*code_chunk = (struct xnn_generated_code_chunk) {offset, offset + code_size};
return xnn_status_success;
error:
*code_chunk = (struct xnn_generated_code_chunk) {offset, offset};
return status;
}
void xnn_generate_gemms_up_to_max_mr(
size_t max_mr,
struct gemm_codegens generators,
const struct jit_gemm_params *jit_gemm_params,
size_t group_output_channels,
size_t nr,
size_t group_input_channels_in_bytes,
xnn_operator_t op)
{
assert(XNN_MAX_MR >= max_mr);
if (op->code_cache == NULL || !xnn_code_cache_valid(op->code_cache)) {
return;
}
for (size_t mr = 1; mr <= max_mr; mr++) {
// Get smallest generator that is >= mr.
size_t smallest_mr = mr;
while (generators.gemm[smallest_mr - 1].function[XNN_UARCH_DEFAULT] == NULL && smallest_mr < max_mr) {
smallest_mr++;
}
for (size_t i = 0; i < XNN_MAX_UARCH_TYPES; i++) {
xnn_log_debug("using generator for mr %zu to generate gemm of mr %zu and uarch %zu", smallest_mr, mr, i);
get_generated_gemm(generators.gemm[smallest_mr - 1].function[i],
jit_gemm_params, mr, group_output_channels, nr, group_input_channels_in_bytes, op->code_cache,
&op->ukernel.gemm.gemm_cases[mr - 1].generated_code_chunk[i]);
}
}
}
static enum xnn_status get_generated_igemm(
xnn_jit_igemm_code_generator_fn generator,
const struct jit_gemm_params *jit_gemm_params,
size_t group_output_channels,
size_t nr,
size_t group_input_channels_in_bytes,
size_t kernel_size,
size_t mr,
struct xnn_code_cache* code_cache,
struct xnn_generated_code_chunk* code_chunk)
{
size_t offset = XNN_CACHE_NOT_FOUND;
enum xnn_status status = xnn_status_success;
if (generator == NULL) {
status = xnn_status_uninitialized;
goto error;
}
status = xnn_reserve_code_memory(&code_cache->cache.code, XNN_DEFAULT_MICROKERNEL_SIZE);
if (xnn_status_success != status) {
xnn_log_error("failed to ensure sufficient space in code buffer for microkernel");
goto error;
}
const size_t old_size = code_cache->cache.code.size;
void* old_code = (uint8_t*) code_cache->cache.code.start + old_size;
status = generator(&code_cache->cache.code, mr, group_output_channels % nr,
group_input_channels_in_bytes,
kernel_size * mr * sizeof(void*), jit_gemm_params);
if (status != xnn_status_success) {
xnn_log_error("failed to generate IGEMM microkernel");
goto error;
}
const size_t new_size = code_cache->cache.code.size;
const size_t code_size = new_size - old_size;
offset = xnn_get_or_insert_code_cache(code_cache, old_code, code_size);
*code_chunk = (struct xnn_generated_code_chunk) {offset, offset + code_size};
return xnn_status_success;
error:
*code_chunk = (struct xnn_generated_code_chunk) {offset, offset};
return status;
}
void xnn_generate_igemms_up_to_max_mr(
size_t max_mr,
struct gemm_codegens generators,
const struct jit_gemm_params *jit_gemm_params,
size_t group_output_channels,
size_t nr,
size_t group_input_channels_in_bytes,
size_t kernel_size,
xnn_operator_t op)
{
assert(XNN_MAX_MR >= max_mr);
if (op->code_cache == NULL || !xnn_code_cache_valid(op->code_cache)) {
return;
}
for (size_t mr = 1; mr <= max_mr; mr++) {
// Get smallest generator that is >= mr.
size_t smallest_mr = mr;
while (generators.igemm[smallest_mr - 1].function[XNN_UARCH_DEFAULT] == NULL && smallest_mr < max_mr) {
smallest_mr++;
}
for (size_t i = 0; i < XNN_MAX_UARCH_TYPES; i++) {
xnn_log_debug("using generator for mr %zu to generate igemm of mr %zu and uarch %zu", smallest_mr, mr, i);
get_generated_igemm(generators.igemm[smallest_mr - 1].function[i], jit_gemm_params,
group_output_channels, nr, group_input_channels_in_bytes, kernel_size, mr,
op->code_cache, &op->ukernel.igemm.igemm_cases[mr - 1].generated_code_chunk[i]);
}
}
}
static inline uintptr_t cached_code_at_offset(xnn_operator_t op, size_t offset)
{
return (uintptr_t)op->code_cache->cache.code.start + offset;
}
void xnn_overwrite_gemm_cases_with_generated_code(
xnn_operator_t op,
struct xnn_hmp_gemm_ukernel *gemm_cases,
size_t mr)
{
if (op->code_cache == NULL) {
return;
}
for (size_t i = 0; i < XNN_MAX_UARCH_TYPES; i++) {
const struct xnn_generated_code_chunk chunk = gemm_cases[mr - 1].generated_code_chunk[i];
if (chunk.offset != XNN_CACHE_NOT_FOUND) {
const uintptr_t gemm_kernel = xnn_first_function_in_chunk_ptr(&op->code_cache->cache.code, chunk.offset, chunk.offset_end);
if (gemm_kernel == (uintptr_t) XNN_INVALID_FUNCTION_INDEX) {
xnn_log_warning("failed to finalize gemm kernel code");
continue;
}
gemm_cases[mr - 1].function[i] = (xnn_gemm_ukernel_fn) gemm_kernel;
}
}
}
void xnn_overwrite_igemm_cases_with_generated_code(
xnn_operator_t op,
struct xnn_hmp_igemm_ukernel *igemm_cases,
size_t mr)
{
if (op->code_cache == NULL) {
return;
}
for (size_t i = 0; i < XNN_MAX_UARCH_TYPES; i++) {
const struct xnn_generated_code_chunk chunk = igemm_cases[mr - 1].generated_code_chunk[i];
const uintptr_t gemm_kernel = xnn_first_function_in_chunk_ptr(&op->code_cache->cache.code, chunk.offset, chunk.offset_end);
if (gemm_kernel == (uintptr_t) XNN_INVALID_FUNCTION_INDEX) {
xnn_log_warning("failed to finalize igemm kernel code");
continue;
}
igemm_cases[mr - 1].function[i] = (xnn_igemm_ukernel_fn) gemm_kernel;
}
}
void xnn_generate_vunary_ukernel(
const struct xnn_unary_elementwise_config* config,
xnn_operator_t op)
{
#if XNN_PLATFORM_WEB
xnn_vunary_ukernel_fn* jit_ptr = &op->ukernel.vunary.function;
if (config->generator != NULL && *jit_ptr == NULL) {
struct xnn_code_buffer b;
if (xnn_allocate_code_memory(&b, XNN_DEFAULT_CODE_BUFFER_SIZE) != xnn_status_success) {
xnn_log_warning("failed to allocate memory");
return;
}
if (config->generator(&b, config->element_tile, 0) != xnn_status_success) {
xnn_log_warning("failed to generate vunary kernel");
return;
}
if (xnn_finalize_code_memory(&b) != xnn_status_success) {
xnn_log_warning("failed to finalize vunary kernel code");
}
const uintptr_t function_index = xnn_first_function_in_chunk_ptr(&b, 0, b.size);
if (function_index == XNN_INVALID_FUNCTION_INDEX) {
xnn_log_warning("failed to finalize vunary kernel code");
return;
}
*jit_ptr = (xnn_vunary_ukernel_fn) function_index;
xnn_release_code_memory(&b);
}
#endif // XNN_PLATFORM_WEB
}
#endif // XNN_PLATFORM_JIT
void* xnn_get_pointer_to_write_weights(
xnn_operator_t op,
size_t aligned_weights_size,
int padding_byte)
{
assert(aligned_weights_size % XNN_ALLOCATION_ALIGNMENT == 0);
void* weights_ptr = NULL;
if (use_weights_cache(op)) {
weights_ptr = xnn_reserve_space_in_weights_cache(op->weights_cache, aligned_weights_size);
if (weights_ptr == NULL) {
return NULL;
}
} else {
op->packed_weights.pointer = xnn_allocate_simd_memory(aligned_weights_size);
if (op->packed_weights.pointer == NULL) {
return NULL;
}
weights_ptr = op->packed_weights.pointer;
}
memset(weights_ptr, padding_byte, aligned_weights_size);
return weights_ptr;
}
size_t xnn_compute_convolution_output_dimension(
size_t padded_input_dimension,
size_t kernel_dimension,
size_t dilation_dimension,
size_t subsampling_dimension)
{
const size_t effective_kernel_dimension = (kernel_dimension - 1) * dilation_dimension + 1;
return doz(padded_input_dimension, effective_kernel_dimension) / subsampling_dimension + 1;
}
size_t xnn_compute_deconvolution_output_dimension(
size_t input_dimension,
size_t output_padding_dimension,
size_t adjustment_dimension,
size_t kernel_dimension,
size_t dilation_dimension,
size_t stride_dimension)
{
const size_t effective_kernel_dimension = (kernel_dimension - 1) * dilation_dimension + 1;
return doz(
stride_dimension * (input_dimension - 1) + adjustment_dimension + effective_kernel_dimension,
output_padding_dimension);
}
size_t xnn_compute_unpooling_output_dimension(
size_t input_dimension,
size_t input_padding_dimension,
size_t kernel_dimension)
{
return xnn_compute_deconvolution_output_dimension(
input_dimension, input_padding_dimension, /*adjustment_dimension=*/0,
kernel_dimension, /*dilation_dimension=*/1, /*stride_dimension=*/kernel_dimension);
}
// Calculate how much work a microkernel does.
// A MxN microkernel does M+N (scalar) loads and M*N (scalar) FMAs.
// So, given batch_size, the microkernel does:
// divide_round_up(batch_size, mr) * (mr + nr) loads, and
// divide_round_up(batch_size, mr) * (mr * nr) FMAs.
// The total cost is then a linear combination of these 2 operations. From experimental data, use a multiplier of 3 for
// loads, to prefer higher tile sizes which have better computation intensity.
static size_t calculate_microkernel_cost(size_t batch_size, uint32_t mr, uint32_t nr)
{
return divide_round_up(batch_size, mr) * (3 * (mr + nr) + mr * nr);
}
static bool mr_is_available_gemm(size_t mr, struct xnn_hmp_gemm_ukernel *gemm_cases, bool code_cache_available)
{
#if XNN_PLATFORM_JIT
if (code_cache_available) {
return gemm_cases[mr-1].generated_code_chunk[XNN_UARCH_DEFAULT].offset != XNN_CACHE_NOT_FOUND ||
gemm_cases[mr-1].function[XNN_UARCH_DEFAULT] != NULL;
}
#endif
return gemm_cases[mr-1].function[XNN_UARCH_DEFAULT] != NULL;
}
uint32_t xnn_get_heuristic_mr_gemm(
size_t batch_size, uint32_t max_mr, uint32_t nr, struct xnn_hmp_gemm_ukernel *gemm_cases, bool code_cache_available)
{
if (batch_size <= max_mr && mr_is_available_gemm(batch_size, gemm_cases, code_cache_available)) {
// We have a microkernel with MR that is the exact match with batch_size.
return batch_size;
}
// Try to find the best fitting mr.
// - use a cost heuristic to calculate how much work is done by the microkernel (see calculate_microkernel_cost)
// - smaller cost is better
uint32_t best_mr = max_mr;
size_t best_cost = SIZE_MAX;
for (uint32_t mr = 1; mr <= max_mr; mr++) {
if (!mr_is_available_gemm(mr, gemm_cases, code_cache_available)){
continue;
}
const size_t current_cost = calculate_microkernel_cost(batch_size, mr, nr);
if (current_cost <= best_cost) {
best_mr = mr;
best_cost = current_cost;
}
}
return best_mr;
}
static bool mr_is_available_igemm(size_t mr, struct xnn_hmp_igemm_ukernel *igemm_cases, bool code_cache_available)
{
#if XNN_PLATFORM_JIT
if (code_cache_available) {
return igemm_cases[mr-1].generated_code_chunk[XNN_UARCH_DEFAULT].offset != XNN_CACHE_NOT_FOUND ||
igemm_cases[mr-1].function[XNN_UARCH_DEFAULT] != NULL;
}
#endif
return igemm_cases[mr-1].function[XNN_UARCH_DEFAULT] != NULL;
}
uint32_t xnn_get_heuristic_mr_igemm(
size_t batch_size, uint32_t max_mr, uint32_t nr, struct xnn_hmp_igemm_ukernel *igemm_cases,
bool code_cache_available)
{
if (batch_size <= max_mr && mr_is_available_igemm(batch_size, igemm_cases, code_cache_available)) {
// We have a microkernel with MR that is the exact match with batch_size.
return batch_size;
}
// Try to find the best fitting mr.
// - use a cost heuristic to calculate how much work is done by the microkernel (see calculate_microkernel_cost)
// - smaller cost is better
uint32_t best_mr = max_mr;
size_t best_cost = SIZE_MAX;
for (uint32_t mr = 1; mr <= max_mr; mr++) {
if (!mr_is_available_igemm(mr, igemm_cases, code_cache_available)){
continue;
}
const size_t current_cost = calculate_microkernel_cost(batch_size, mr, nr);
if (current_cost <= best_cost) {
best_mr = mr;
best_cost = current_cost;
}
}
return best_mr;
}
| 14,191 | 35.111959 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/pavgpool-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/pavgpool.h>
static struct xnn_pavgpool_config f16_pavgpool_config = {0};
static struct xnn_pavgpool_config f32_pavgpool_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_pavgpool = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_pavgpool = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_pavgpool = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_pavgpool = PTHREAD_ONCE_INIT;
#endif
static void init_f16_pavgpool_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f16_pavgpool_minmax_ukernel_9x__neonfp16arith_c8;
f16_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f16_pavgpool_minmax_ukernel_9p8x__neonfp16arith_c8;
f16_pavgpool_config.init.f16 = xnn_init_f16_minmax_fp16arith_params;
f16_pavgpool_config.primary_tile = 9;
f16_pavgpool_config.incremental_tile = 8;
f16_pavgpool_config.channel_tile = 8;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f16_pavgpool_minmax_ukernel_9x__neonfp16arith_c8;
f16_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f16_pavgpool_minmax_ukernel_9p8x__neonfp16arith_c8;
f16_pavgpool_config.init.f16 = xnn_init_f16_minmax_fp16arith_params;
f16_pavgpool_config.primary_tile = 9;
f16_pavgpool_config.incremental_tile = 8;
f16_pavgpool_config.channel_tile = 8;
}
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_avx2) {
f16_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f16_pavgpool_minmax_ukernel_9x__avx2_c8;
f16_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f16_pavgpool_minmax_ukernel_9p8x__avx2_c8;
f16_pavgpool_config.init.f16 = xnn_init_f16_minmax_avx_params;
f16_pavgpool_config.primary_tile = 9;
f16_pavgpool_config.incremental_tile = 8;
f16_pavgpool_config.channel_tile = 8;
}
#endif
}
static void init_f32_pavgpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9x__neon_c4;
f32_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4;
f32_pavgpool_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_pavgpool_config.primary_tile = 9;
f32_pavgpool_config.incremental_tile = 8;
f32_pavgpool_config.channel_tile = 4;
} else if (!XNN_PLATFORM_MOBILE) {
f32_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9x__scalar_c1;
f32_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9p8x__scalar_c1;
f32_pavgpool_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_pavgpool_config.primary_tile = 9;
f32_pavgpool_config.incremental_tile = 8;
f32_pavgpool_config.channel_tile = 1;
}
#elif XNN_ARCH_ARM64
f32_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9x__neon_c4;
f32_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4;
f32_pavgpool_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_pavgpool_config.primary_tile = 9;
f32_pavgpool_config.incremental_tile = 8;
f32_pavgpool_config.channel_tile = 4;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9x__sse_c4;
f32_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4;
f32_pavgpool_config.init.f32 = xnn_init_f32_minmax_sse_params;
f32_pavgpool_config.primary_tile = 9;
f32_pavgpool_config.incremental_tile = 8;
f32_pavgpool_config.channel_tile = 4;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9x__wasmsimd_x86_c4;
f32_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4;
f32_pavgpool_config.init.f32 = xnn_init_f32_minmax_wasmsimd_params;
f32_pavgpool_config.primary_tile = 9;
f32_pavgpool_config.incremental_tile = 8;
f32_pavgpool_config.channel_tile = 4;
} else {
f32_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9x__wasmsimd_arm_c4;
f32_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4;
f32_pavgpool_config.init.f32 = xnn_init_f32_minmax_wasmsimd_params;
f32_pavgpool_config.primary_tile = 9;
f32_pavgpool_config.incremental_tile = 8;
f32_pavgpool_config.channel_tile = 4;
}
#elif XNN_ARCH_WASM
f32_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9x__wasm_c1;
f32_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9p8x__wasm_c1;
f32_pavgpool_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_pavgpool_config.primary_tile = 9;
f32_pavgpool_config.incremental_tile = 8;
f32_pavgpool_config.channel_tile = 1;
#elif XNN_ARCH_RISCV
f32_pavgpool_config.unipass = (xnn_pavgpool_unipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9x__scalar_c1;
f32_pavgpool_config.multipass = (xnn_pavgpool_multipass_ukernel_fn) xnn_f32_pavgpool_minmax_ukernel_9p8x__scalar_c1;
f32_pavgpool_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_pavgpool_config.primary_tile = 9;
f32_pavgpool_config.incremental_tile = 8;
f32_pavgpool_config.channel_tile = 1;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_pavgpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_pavgpool_config();
return TRUE;
}
static BOOL CALLBACK init_f32_pavgpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_pavgpool_config();
return TRUE;
}
#endif
const struct xnn_pavgpool_config* xnn_init_f16_pavgpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_pavgpool, &init_f16_pavgpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_pavgpool, &init_f16_pavgpool_config);
#endif
return &f16_pavgpool_config;
}
const struct xnn_pavgpool_config* xnn_init_f32_pavgpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_pavgpool, &init_f32_pavgpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_pavgpool, &init_f32_pavgpool_config);
#endif
return &f32_pavgpool_config;
}
| 8,472 | 47.695402 | 129 |
c
|
XNNPACK
|
XNNPACK-master/src/prelu-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/prelu.h>
static struct xnn_prelu_config f16_prelu_config = {0};
static struct xnn_prelu_config f32_prelu_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_prelu = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_prelu = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_prelu = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_prelu = PTHREAD_ONCE_INIT;
#endif
static void init_f16_prelu_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f16_prelu_ukernel__neonfp16arith_2x16;
f16_prelu_config.row_tile = 2;
f16_prelu_config.channel_tile = 16;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f16_prelu_ukernel__neonfp16arith_2x16;
f16_prelu_config.row_tile = 2;
f16_prelu_config.channel_tile = 16;
}
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_avx2) {
f16_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f16_prelu_ukernel__f16c_2x16;
f16_prelu_config.row_tile = 2;
f16_prelu_config.channel_tile = 16;
}
#endif
}
static void init_f32_prelu_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__neon_2x8;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 8;
} else if (!XNN_PLATFORM_MOBILE) {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__scalar_2x4;
f32_prelu_config.row_tile = 4;
f32_prelu_config.channel_tile = 4;
}
#elif XNN_ARCH_ARM64
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__neon_2x8;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 8;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (!XNN_PLATFORM_MOBILE && hardware_config->use_x86_avx512f) {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__avx512f_2x16;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 16;
} else if (hardware_config->use_x86_avx) {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__avx_2x16;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 16;
} else if (hardware_config->use_x86_sse4_1) {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__sse41_2x8;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 8;
} else {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__sse2_2x8;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 8;
}
#elif XNN_ARCH_WASMRELAXEDSIMD
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__wasmrelaxedsimd_iminmax_2x4;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 4;
} else {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__wasmrelaxedsimd_laneselect_2x4;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 4;
}
#elif XNN_ARCH_WASMSIMD
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__wasmsimd_iminmax_2x8;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 8;
} else {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__wasmsimd_laneselect_2x8;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 8;
}
#elif XNN_ARCH_WASM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__scalar_2x4;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 4;
} else {
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__wasm_2x4;
f32_prelu_config.row_tile = 2;
f32_prelu_config.channel_tile = 4;
}
#elif XNN_ARCH_RISCV
f32_prelu_config.ukernel = (xnn_prelu_ukernel_fn) xnn_f32_prelu_ukernel__scalar_2x4;
f32_prelu_config.row_tile = 4;
f32_prelu_config.channel_tile = 4;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_prelu_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_prelu_config();
return TRUE;
}
static BOOL CALLBACK init_f32_prelu_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_prelu_config();
return TRUE;
}
#endif
const struct xnn_prelu_config* xnn_init_f16_prelu_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_prelu, &init_f16_prelu_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_prelu, &init_f16_prelu_config);
#endif
return &f16_prelu_config;
}
const struct xnn_prelu_config* xnn_init_f32_prelu_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_prelu, &init_f32_prelu_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_prelu, &init_f32_prelu_config);
#endif
return &f32_prelu_config;
}
| 7,063 | 38.909605 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/raddstoreexpminusmax-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/raddstoreexpminusmax.h>
static struct xnn_raddstoreexpminusmax_config f16_raddstoreexpminusmax_config = {0};
static struct xnn_raddstoreexpminusmax_config f32_raddstoreexpminusmax_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_raddstoreexpminusmax = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_raddstoreexpminusmax = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_raddstoreexpminusmax = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_raddstoreexpminusmax = PTHREAD_ONCE_INIT;
#endif
static void init_f16_raddstoreexpminusmax_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x32;
f16_raddstoreexpminusmax_config.init.f16 = xnn_init_f16_expminus_fp16arith_rr2_p2_params;
f16_raddstoreexpminusmax_config.element_tile = 32;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x40;
f16_raddstoreexpminusmax_config.init.f16 = xnn_init_f16_expminus_fp16arith_rr2_p2_params;
f16_raddstoreexpminusmax_config.element_tile = 40;
}
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_avx2) {
f16_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40;
f16_raddstoreexpminusmax_config.init.f16 = xnn_init_f16_expminus_avx2_rr1_p2_params;
f16_raddstoreexpminusmax_config.element_tile = 40;
}
#endif
}
static void init_f32_raddstoreexpminusmax_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x8;
f32_raddstoreexpminusmax_config.init.f32 = xnn_init_f32_expminus_neon_rr2_lut64_p2_params;
f32_raddstoreexpminusmax_config.element_tile = 8;
} else if (!XNN_PLATFORM_MOBILE) {
f32_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4_acc2;
f32_raddstoreexpminusmax_config.init.f32 = xnn_init_f32_expminus_scalar_rr2_p5_params;
f32_raddstoreexpminusmax_config.element_tile = 4;
}
#elif XNN_ARCH_ARM64
f32_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x16;
f32_raddstoreexpminusmax_config.init.f32 = xnn_init_f32_expminus_neonfma_rr1_lut64_p2_params;
f32_raddstoreexpminusmax_config.element_tile = 16;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x20_acc2;
f32_raddstoreexpminusmax_config.init.f32 = xnn_init_f32_expminus_sse2_rr2_p5_params;
f32_raddstoreexpminusmax_config.element_tile = 20;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
#if XNN_ARCH_WASMRELAXEDSIMD
f32_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x16_acc2;
f32_raddstoreexpminusmax_config.init.f32 = xnn_init_f32_expminus_wasmsimd_rr2_p5_params;
f32_raddstoreexpminusmax_config.element_tile = 16;
#else
f32_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x16_acc2;
f32_raddstoreexpminusmax_config.init.f32 = xnn_init_f32_expminus_wasmsimd_rr2_p5_params;
f32_raddstoreexpminusmax_config.element_tile = 16;
#endif
#elif XNN_ARCH_WASM
f32_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4_acc2;
f32_raddstoreexpminusmax_config.init.f32 = xnn_init_f32_expminus_scalar_rr2_p5_params;
f32_raddstoreexpminusmax_config.element_tile = 4;
#elif XNN_ARCH_RISCV
f32_raddstoreexpminusmax_config.ukernel =
(xnn_raddstoreexpminusmax_ukernel_fn) xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4_acc2;
f32_raddstoreexpminusmax_config.init.f32 = xnn_init_f32_expminus_scalar_rr2_p5_params;
f32_raddstoreexpminusmax_config.element_tile = 4;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_raddstoreexpminusmax_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_raddstoreexpminusmax_config();
return TRUE;
}
static BOOL CALLBACK init_f32_raddstoreexpminusmax_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_raddstoreexpminusmax_config();
return TRUE;
}
#endif
static bool is_f16_compatible_config(const struct xnn_hardware_config hardware_config[restrict XNN_MIN_ELEMENTS(1)]) {
#if (XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR) || (XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR)
return hardware_config->use_arm_neon_fp16_arith;
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
return hardware_config->use_x86_avx2;
#else
return false;
#endif
}
const struct xnn_raddstoreexpminusmax_config* xnn_init_f16_raddstoreexpminusmax_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !is_f16_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_raddstoreexpminusmax, &init_f16_raddstoreexpminusmax_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_raddstoreexpminusmax, &init_f16_raddstoreexpminusmax_config);
#endif
return &f16_raddstoreexpminusmax_config;
}
const struct xnn_raddstoreexpminusmax_config* xnn_init_f32_raddstoreexpminusmax_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_raddstoreexpminusmax, &init_f32_raddstoreexpminusmax_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_raddstoreexpminusmax, &init_f32_raddstoreexpminusmax_config);
#endif
return &f32_raddstoreexpminusmax_config;
}
| 7,580 | 46.38125 | 130 |
c
|
XNNPACK
|
XNNPACK-master/src/reduce-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/reduce.h>
static struct xnn_reduce_config f16_f32acc_rsum_config = {0};
static struct xnn_reduce_config f32_rminmax_config = {0};
static struct xnn_reduce_config f32_rsum_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_f32acc_rsum = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_rminmax = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_rsum = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_f32acc_rsum = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_rminmax = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_rsum = PTHREAD_ONCE_INIT;
#endif
static void init_f16_f32acc_rsum_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_f32acc_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f16_f32acc_rsum_ukernel__neonfp16_x32_acc4,
.init.f16_f32acc_scale = xnn_init_f16_f32acc_scale_scalar_params,
.element_tile = 32,
};
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_f32acc_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f16_f32acc_rsum_ukernel__neonfp16_x32_acc4,
.init.f16_f32acc_scale = xnn_init_f16_f32acc_scale_scalar_params,
.element_tile = 32,
};
}
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_f16c) {
f16_f32acc_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f16_f32acc_rsum_ukernel__f16c_x32_acc4,
.init.f16_f32acc_scale = xnn_init_f16_f32acc_scale_avx_params,
.element_tile = 32,
};
}
#endif
}
static void init_f32_rminmax_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_rminmax_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rminmax_ukernel__neon_x16_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 16,
};
} else if (!XNN_PLATFORM_MOBILE) {
f32_rminmax_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rminmax_ukernel__scalar_x4_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 4,
};
}
#elif XNN_ARCH_ARM64
f32_rminmax_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rminmax_ukernel__neon_x16_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 16,
};
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_rminmax_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rminmax_ukernel__sse_x16_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 16,
};
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
f32_rminmax_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rminmax_ukernel__wasmsimd_minmax_x16_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 16,
};
#elif XNN_ARCH_WASM
f32_rminmax_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rminmax_ukernel__wasm_x4_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 4,
};
#elif XNN_ARCH_RISCV
f32_rminmax_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rminmax_ukernel__scalar_x4_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 4,
};
#endif
}
static void init_f32_rsum_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rsum_ukernel__neon_x16_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 16,
};
} else if (!XNN_PLATFORM_MOBILE) {
f32_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rsum_ukernel__scalar_x4_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 4,
};
}
#elif XNN_ARCH_ARM64
f32_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rsum_ukernel__neon_x16_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 16,
};
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_avx) {
f32_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rsum_ukernel__avx_x32_acc4,
.init.f32_scale = xnn_init_f32_scale_avx_params,
.element_tile = 32,
};
} else {
f32_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rsum_ukernel__sse_x16_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 16,
};
}
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
f32_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rsum_ukernel__wasmsimd_x16_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 16,
};
#elif XNN_ARCH_WASM
f32_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rsum_ukernel__scalar_x4_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 4,
};
#elif XNN_ARCH_RISCV
f32_rsum_config = (struct xnn_reduce_config) {
.ukernel = (xnn_reduce_ukernel_fn) xnn_f32_rsum_ukernel__scalar_x4_acc4,
.init.f32_scale = xnn_init_f32_scale_scalar_params,
.element_tile = 4,
};
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_f32acc_rsum_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_f32acc_rsum_config();
return TRUE;
}
static BOOL CALLBACK init_f32_rminmax_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_rminmax_config();
return TRUE;
}
static BOOL CALLBACK init_f32_rsum_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_rsum_config();
return TRUE;
}
#endif
const struct xnn_reduce_config* xnn_init_f16_f32acc_rsum_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_f32acc_rsum, &init_f16_f32acc_rsum_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_f32acc_rsum, &init_f16_f32acc_rsum_config);
#endif
return &f16_f32acc_rsum_config;
}
const struct xnn_reduce_config* xnn_init_f32_rminmax_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_rminmax, &init_f32_rminmax_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_rminmax, &init_f32_rminmax_config);
#endif
return &f32_rminmax_config;
}
const struct xnn_reduce_config* xnn_init_f32_rsum_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_rsum, &init_f32_rsum_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_rsum, &init_f32_rsum_config);
#endif
return &f32_rsum_config;
}
| 8,973 | 37.350427 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/rmax-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/rmax.h>
static struct xnn_rmax_config f16_rmax_config = {0};
static struct xnn_rmax_config f32_rmax_config = {0};
static struct xnn_rmax_config u8_rmax_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_rmax = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_rmax = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_u8_rmax = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_rmax = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_rmax = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_u8_rmax = PTHREAD_ONCE_INIT;
#endif
static void init_f16_rmax_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_rmax_config.rmax.f16 = (xnn_rmax_ukernel_fn) xnn_f16_rmax_ukernel__neonfp16arith;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_rmax_config.rmax.f16 = (xnn_rmax_ukernel_fn) xnn_f16_rmax_ukernel__neonfp16arith;
}
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_avx2) {
f16_rmax_config.rmax.f16 = (xnn_rmax_ukernel_fn) xnn_f16_rmax_ukernel__f16c;
}
#endif
}
static void init_f32_rmax_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_rmax_config.rmax.f32 = (xnn_rmax_ukernel_fn) xnn_f32_rmax_ukernel__neon;
} else if (!XNN_PLATFORM_MOBILE) {
f32_rmax_config.rmax.f32 = (xnn_rmax_ukernel_fn) xnn_f32_rmax_ukernel__scalar;
}
#elif XNN_ARCH_ARM64
f32_rmax_config.rmax.f32 = (xnn_rmax_ukernel_fn) xnn_f32_rmax_ukernel__neon;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_rmax_config.rmax.f32 = (xnn_rmax_ukernel_fn) xnn_f32_rmax_ukernel__sse;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_rmax_config.rmax.f32 = (xnn_rmax_ukernel_fn) xnn_f32_rmax_ukernel__wasmsimd_x86;
} else {
f32_rmax_config.rmax.f32 = (xnn_rmax_ukernel_fn) xnn_f32_rmax_ukernel__wasmsimd_arm;
}
#elif XNN_ARCH_WASM
f32_rmax_config.rmax.f32 = (xnn_rmax_ukernel_fn) xnn_f32_rmax_ukernel__scalar;
#elif XNN_ARCH_RISCV
f32_rmax_config.rmax.f32 = (xnn_rmax_ukernel_fn) xnn_f32_rmax_ukernel__scalar;
#endif
}
static void init_u8_rmax_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
u8_rmax_config.rmax.u8 = xnn_u8_rmax_ukernel__neon;
} else if (!XNN_PLATFORM_MOBILE) {
u8_rmax_config.rmax.u8 = xnn_u8_rmax_ukernel__scalar;
}
#elif XNN_ARCH_ARM64
u8_rmax_config.rmax.u8 = xnn_u8_rmax_ukernel__neon;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
u8_rmax_config.rmax.u8 = xnn_u8_rmax_ukernel__sse2;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
u8_rmax_config.rmax.u8 = xnn_u8_rmax_ukernel__scalar;
#elif XNN_ARCH_WASM
u8_rmax_config.rmax.u8 = xnn_u8_rmax_ukernel__scalar;
#elif XNN_ARCH_RISCV
u8_rmax_config.rmax.u8 = xnn_u8_rmax_ukernel__scalar;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_rmax_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_rmax_config();
return TRUE;
}
static BOOL CALLBACK init_f32_rmax_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_rmax_config();
return TRUE;
}
static BOOL CALLBACK init_u8_rmax_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_u8_rmax_config();
return TRUE;
}
#endif
const struct xnn_rmax_config* xnn_init_f16_rmax_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_rmax, &init_f16_rmax_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_rmax, &init_f16_rmax_config);
#endif
return &f16_rmax_config;
}
const struct xnn_rmax_config* xnn_init_f32_rmax_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_rmax, &init_f32_rmax_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_rmax, &init_f32_rmax_config);
#endif
return &f32_rmax_config;
}
const struct xnn_rmax_config* xnn_init_u8_rmax_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_u8_rmax, &init_u8_rmax_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_u8_rmax, &init_u8_rmax_config);
#endif
return &u8_rmax_config;
}
| 5,982 | 35.932099 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/spmm-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/spmm.h>
static struct xnn_spmm_config f16_spmm_config = {0};
static struct xnn_spmm_config f32_spmm_config = {0};
static struct xnn_spmm_config f32_spmm2_config = {0};
static struct xnn_spmm_config f32_spmm4_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_spmm = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_spmm = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_spmm2 = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_spmm4 = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_spmm = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_spmm = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_spmm2 = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_spmm4 = PTHREAD_ONCE_INIT;
#endif
static void init_f16_spmm_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_spmm_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith_pipelined;
f16_spmm_config.init.f16 = xnn_init_f16_minmax_fp16arith_params;
f16_spmm_config.mr = 32;
f16_spmm_config.nr = 1;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_spmm_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith_pipelined;
f16_spmm_config.init.f16 = xnn_init_f16_minmax_fp16arith_params;
f16_spmm_config.mr = 32;
f16_spmm_config.nr = 1;
}
#endif
}
static void init_f32_spmm_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_spmm_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_32x1__neon;
f32_spmm_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm_config.mr = 32;
f32_spmm_config.nr = 1;
} else if (!XNN_PLATFORM_MOBILE) {
f32_spmm_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_8x1__scalar;
f32_spmm_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm_config.mr = 8;
f32_spmm_config.nr = 1;
}
#elif XNN_ARCH_ARM64
f32_spmm_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_32x1__neonfma_pipelined;
f32_spmm_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm_config.mr = 32;
f32_spmm_config.nr = 1;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_spmm_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_32x1__sse;
f32_spmm_config.init.f32 = xnn_init_f32_minmax_sse_params;
f32_spmm_config.mr = 32;
f32_spmm_config.nr = 1;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_spmm_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_x86;
f32_spmm_config.init.f32 = xnn_init_f32_minmax_wasmsimd_params;
f32_spmm_config.mr = 32;
f32_spmm_config.nr = 1;
} else {
f32_spmm_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_32x1__wasmsimd_arm;
f32_spmm_config.init.f32 = xnn_init_f32_minmax_wasmsimd_params;
f32_spmm_config.mr = 32;
f32_spmm_config.nr = 1;
}
#elif XNN_ARCH_WASM
f32_spmm_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_8x1__scalar;
f32_spmm_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm_config.mr = 8;
f32_spmm_config.nr = 1;
#elif XNN_ARCH_RISCV
f32_spmm_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_8x1__scalar;
f32_spmm_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm_config.mr = 8;
f32_spmm_config.nr = 1;
#endif
}
static void init_f32_spmm2_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_spmm2_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_8x2__scalar;
f32_spmm2_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm2_config.mr = 8;
f32_spmm2_config.nr = 2;
}
#elif XNN_ARCH_ARM64
f32_spmm2_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_32x2__aarch64_neonfma;
f32_spmm2_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm2_config.mr = 32;
f32_spmm2_config.nr = 2;
#elif XNN_ARCH_WASM
f32_spmm2_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_8x2__scalar;
f32_spmm2_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm2_config.mr = 8;
f32_spmm2_config.nr = 2;
#elif XNN_ARCH_RISCV
f32_spmm2_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_8x2__scalar;
f32_spmm2_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm2_config.mr = 8;
f32_spmm2_config.nr = 2;
#endif
}
static void init_f32_spmm4_config(void) {
#if XNN_ARCH_ARM
if (!XNN_PLATFORM_MOBILE) {
f32_spmm4_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_8x4__scalar;
f32_spmm4_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm4_config.mr = 8;
f32_spmm4_config.nr = 4;
}
#elif XNN_ARCH_ARM64
f32_spmm4_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_32x4__aarch64_neonfma;
f32_spmm4_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm4_config.mr = 32;
f32_spmm4_config.nr = 4;
#elif XNN_ARCH_WASM
f32_spmm4_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_8x4__scalar;
f32_spmm4_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm4_config.mr = 8;
f32_spmm4_config.nr = 4;
#elif XNN_ARCH_RISCV
f32_spmm4_config.ukernel = (xnn_spmm_ukernel_fn) xnn_f32_spmm_minmax_ukernel_8x4__scalar;
f32_spmm4_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_spmm4_config.mr = 8;
f32_spmm4_config.nr = 4;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_spmm_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_spmm_config();
return TRUE;
}
static BOOL CALLBACK init_f32_spmm_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_spmm_config();
return TRUE;
}
static BOOL CALLBACK init_f32_spmm2_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_spmm2_config();
return TRUE;
}
static BOOL CALLBACK init_f32_spmm4_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_spmm4_config();
return TRUE;
}
#endif
const struct xnn_spmm_config* xnn_init_f16_spmm_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_chw_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_spmm, &init_f16_spmm_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_spmm, &init_f16_spmm_config);
#endif
return &f16_spmm_config;
}
const struct xnn_spmm_config* xnn_init_f32_spmm_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_chw_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_spmm, &init_f32_spmm_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_spmm, &init_f32_spmm_config);
#endif
return &f32_spmm_config;
}
const struct xnn_spmm_config* xnn_init_f32_spmm2_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_chw_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_spmm2, &init_f32_spmm2_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_spmm2, &init_f32_spmm2_config);
#endif
return &f32_spmm2_config;
}
const struct xnn_spmm_config* xnn_init_f32_spmm4_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_chw_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_spmm4, &init_f32_spmm4_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_spmm4, &init_f32_spmm4_config);
#endif
return &f32_spmm4_config;
}
| 9,459 | 38.58159 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/tensor.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <xnnpack.h>
#include <xnnpack/allocator.h>
#include <xnnpack/log.h>
#include <xnnpack/params.h>
#include <xnnpack/subgraph.h>
static void set_allocation_type(struct xnn_value* value)
{
if (value->data != NULL) {
value->allocation_type = xnn_allocation_type_static;
} else if ((value->flags & (XNN_VALUE_FLAG_EXTERNAL_INPUT | XNN_VALUE_FLAG_EXTERNAL_OUTPUT)) != 0) {
value->allocation_type = xnn_allocation_type_external;
} else if ((value->flags & XNN_VALUE_FLAG_PERSISTENT) != 0) {
value->allocation_type = xnn_allocation_type_persistent;
} else {
value->allocation_type = xnn_allocation_type_workspace;
}
}
enum xnn_status xnn_define_tensor_value(
xnn_subgraph_t subgraph,
enum xnn_datatype datatype,
size_t num_dims,
const size_t* dims,
const void* data,
uint32_t external_id,
uint32_t flags,
uint32_t* id_out)
{
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create Dense Tensor value: XNNPACK is not initialized");
return xnn_status_uninitialized;
}
if (external_id != XNN_INVALID_VALUE_ID && external_id >= subgraph->external_value_ids) {
xnn_log_error(
"failed to create Dense Tensor value: "
"external ID %" PRIu32 " exceeds the number of reserved external IDs in subgraph (%" PRIu32 ")",
external_id, subgraph->external_value_ids);
return xnn_status_invalid_parameter;
}
if (num_dims > XNN_MAX_TENSOR_DIMS) {
xnn_log_error("failed to create Dense Tensor value: num of dimensions exceeds XNNPACK limit (%d)",
XNN_MAX_TENSOR_DIMS);
return xnn_status_unsupported_parameter;
}
switch (datatype) {
case xnn_datatype_fp32:
case xnn_datatype_fp16:
break;
default:
xnn_log_error("failed to create Dense Tensor value: unsupported datatype %s (%d)",
xnn_datatype_to_string(datatype), datatype);
return xnn_status_unsupported_parameter;
}
struct xnn_value* value = subgraph->values + external_id;
if (external_id == XNN_INVALID_VALUE_ID) {
value = xnn_subgraph_new_internal_value(subgraph);
if (value == NULL) {
return xnn_status_out_of_memory;
}
}
value->type = xnn_value_type_dense_tensor;
value->datatype = datatype;
value->shape.num_dims = num_dims;
memcpy(value->shape.dim, dims, num_dims * sizeof(size_t));
value->size = xnn_tensor_get_size_by_id(subgraph, value->id);
value->flags = flags;
value->data = (void*) (uintptr_t) data;
set_allocation_type(value);
*id_out = value->id;
return xnn_status_success;
}
enum xnn_status xnn_define_quantized_tensor_value(
xnn_subgraph_t subgraph,
enum xnn_datatype datatype,
int32_t zero_point,
float scale,
size_t num_dims,
const size_t* dims,
const void* data,
uint32_t external_id,
uint32_t flags,
uint32_t* id_out)
{
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create Quantized Dense Tensor value: XNNPACK is not initialized");
return xnn_status_uninitialized;
}
if (external_id != XNN_INVALID_VALUE_ID && external_id >= subgraph->external_value_ids) {
xnn_log_error(
"failed to create Quantized Dense Tensor value: "
"external ID %" PRIu32 " exceeds the number of reserved external IDs in subgraph (%" PRIu32 ")",
external_id, subgraph->external_value_ids);
return xnn_status_invalid_parameter;
}
if (num_dims > XNN_MAX_TENSOR_DIMS) {
xnn_log_error(
"failed to create Quantized Dense Tensor value: num of dimensions exceeds XNNPACK limit (%d)",
XNN_MAX_TENSOR_DIMS);
return xnn_status_unsupported_parameter;
}
switch (datatype) {
case xnn_datatype_qint8:
if ((int32_t) (int8_t) zero_point != zero_point) {
xnn_log_error(
"failed to create Quantized Dense Tensor value: invalid zero point %" PRId32" outside the [-128, 127] range",
zero_point);
return xnn_status_invalid_parameter;
}
break;
case xnn_datatype_quint8:
if ((int32_t) (uint8_t) zero_point != zero_point) {
xnn_log_error(
"failed to create Quantized Dense Tensor value: invalid zero point %" PRId32" outside the [0, 255] range",
zero_point);
return xnn_status_invalid_parameter;
}
break;
case xnn_datatype_qint32:
if (zero_point != 0) {
xnn_log_error(
"failed to create Quantized Dense Tensor value: invalid non-zero zero point %" PRId32,
zero_point);
return xnn_status_invalid_parameter;
}
break;
default:
xnn_log_error("failed to create Quantized Dense Tensor value: unsupported datatype %s (%d)",
xnn_datatype_to_string(datatype), datatype);
return xnn_status_unsupported_parameter;
}
if (scale <= 0.0f || !isnormal(scale)) {
xnn_log_error(
"failed to create Quantized Dense Tensor value with %.7g scale: scale must be finite, normalized, and positive",
scale);
return xnn_status_invalid_parameter;
}
struct xnn_value* value = subgraph->values + external_id;
if (external_id == XNN_INVALID_VALUE_ID) {
value = xnn_subgraph_new_internal_value(subgraph);
if (value == NULL) {
return xnn_status_out_of_memory;
}
}
value->type = xnn_value_type_dense_tensor;
value->datatype = datatype;
value->quantization.zero_point = zero_point;
value->quantization.scale = scale;
value->shape.num_dims = num_dims;
memcpy(value->shape.dim, dims, num_dims * sizeof(size_t));
value->size = xnn_tensor_get_size_by_id(subgraph, value->id);
value->flags = flags;
value->data = (void*) (uintptr_t) data;
set_allocation_type(value);
*id_out = value->id;
return xnn_status_success;
}
enum xnn_status xnn_define_channelwise_quantized_tensor_value(
xnn_subgraph_t subgraph,
enum xnn_datatype datatype,
const float* scale,
size_t num_dims,
size_t channel_dim,
const size_t* dims,
const void* data,
uint32_t external_id,
uint32_t flags,
uint32_t* id_out)
{
if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
xnn_log_error("failed to create Channelwise Quantized Dense Tensor value: XNNPACK is not initialized");
return xnn_status_uninitialized;
}
if (external_id != XNN_INVALID_VALUE_ID && external_id >= subgraph->external_value_ids) {
xnn_log_error(
"failed to create Channelwise Quantized Dense Tensor value: "
"external ID %" PRIu32 " exceeds the number of reserved external IDs in subgraph (%" PRIu32 ")",
external_id, subgraph->external_value_ids);
return xnn_status_invalid_parameter;
}
if (num_dims == 0) {
xnn_log_error(
"failed to create Channelwise Quantized Dense Tensor value: no channel dimension exists");
return xnn_status_invalid_parameter;
}
if (num_dims > XNN_MAX_TENSOR_DIMS) {
xnn_log_error(
"failed to create Channelwise Quantized Dense Tensor value: num of dimensions exceeds XNNPACK limit (%d)",
XNN_MAX_TENSOR_DIMS);
return xnn_status_unsupported_parameter;
}
if (channel_dim >= num_dims) {
xnn_log_error(
"failed to create Channelwise Quantized Dense Tensor value: "
"channel dimension index %zu is out of range for %zu-dimensional tensor",
channel_dim, num_dims);
return xnn_status_invalid_parameter;
}
switch (datatype) {
case xnn_datatype_qcint8:
case xnn_datatype_qcint32:
break;
default:
xnn_log_error("failed to create Channelwise Quantized Dense Tensor value: unsupported datatype %s (%d)",
xnn_datatype_to_string(datatype), datatype);
return xnn_status_unsupported_parameter;
}
const size_t channels = dims[channel_dim];
for (size_t channel = 0; channel < channels; channel++) {
if (scale[channel] <= 0.0f || !isnormal(scale[channel])) {
xnn_log_error(
"failed to create Channelwise Quantized Dense Tensor value with %.7g scale in channel #%zu: "
"scale must be finite, normalized, and positive",
scale[channel], channel);
return xnn_status_invalid_parameter;
}
}
struct xnn_value* value = subgraph->values + external_id;
if (external_id == XNN_INVALID_VALUE_ID) {
value = xnn_subgraph_new_internal_value(subgraph);
if (value == NULL) {
return xnn_status_out_of_memory;
}
}
value->type = xnn_value_type_dense_tensor;
value->datatype = datatype;
value->quantization.zero_point = 0;
value->quantization.channelwise_scale = scale;
value->quantization.channel_dimension = channel_dim;
value->shape.num_dims = num_dims;
memcpy(value->shape.dim, dims, num_dims * sizeof(size_t));
value->size = xnn_tensor_get_size_by_id(subgraph, value->id);
value->flags = flags;
value->data = (void*) (uintptr_t) data;
set_allocation_type(value);
*id_out = value->id;
return xnn_status_success;
}
size_t xnn_shape_multiply_all_dims(
const struct xnn_shape shape[restrict XNN_MIN_ELEMENTS(1)])
{
size_t batch_size = 1;
for (size_t i = 0; i < shape->num_dims; i++) {
batch_size *= shape->dim[i];
}
return batch_size;
}
size_t xnn_shape_multiply_batch_dims(
const struct xnn_shape shape[restrict XNN_MIN_ELEMENTS(1)],
size_t num_nonbatch_dims)
{
size_t batch_size = 1;
for (size_t i = 0; i + num_nonbatch_dims < shape->num_dims; i++) {
batch_size *= shape->dim[i];
}
return batch_size;
}
size_t xnn_shape_multiply_non_channel_dims(
const struct xnn_shape shape[restrict XNN_MIN_ELEMENTS(1)])
{
size_t batch_size = 1;
for (size_t i = 0; i + 1 < shape->num_dims; i++) {
batch_size *= shape->dim[i];
}
return batch_size;
}
size_t xnn_tensor_get_size(const struct xnn_value* value)
{
assert(value->type == xnn_value_type_dense_tensor);
assert(value->datatype != xnn_datatype_invalid);
size_t size = 0;
switch (value->datatype) {
case xnn_datatype_fp16:
size = 2;
break;
case xnn_datatype_fp32:
size = 4;
break;
case xnn_datatype_qint8:
case xnn_datatype_quint8:
case xnn_datatype_qcint8:
size = 1;
break;
case xnn_datatype_qint32:
case xnn_datatype_qcint32:
size = 4;
break;
case xnn_datatype_invalid:
XNN_UNREACHABLE;
}
return size * xnn_shape_multiply_all_dims(&value->shape);
}
size_t xnn_tensor_get_size_by_id(xnn_subgraph_t subgraph, uint32_t value_id)
{
assert(value_id < subgraph->num_values);
const struct xnn_value* value = subgraph->values + value_id;
return xnn_tensor_get_size(value);
}
| 10,805 | 30.876106 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/transpose-config.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/transpose.h>
#include <xnnpack/vunary.h>
static struct xnn_transpose_config transpose_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard = PTHREAD_ONCE_INIT;
#endif
static void init_transpose_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
transpose_config.copy = (xnn_vunary_ukernel_fn) xnn_xx_copy_ukernel__scalar_memcpy;
transpose_config.x8 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon,
.tile_size = 32,
};
transpose_config.x16 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon,
.tile_size = 32,
};
transpose_config.x24 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x24_transposec_ukernel__2x2_neon_tbl64,
.init.x24 = (xnn_init_x24_transpose_params_fn) xnn_init_x24_transpose_neon_tbl64_params,
.tile_size = 32,
};
transpose_config.x32 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x32_transposec_ukernel__4x4_reuse_dec_zip_neon,
.tile_size = 32,
};
transpose_config.xx = (struct xnn_transpose_subconfig) {
.variable_size_ukernel = xnn_xx_transposev_ukernel__1x1_scalar_memcpy,
.tile_size = 32,
};
} else if (!XNN_PLATFORM_MOBILE) {
transpose_config.copy = (xnn_vunary_ukernel_fn) xnn_xx_copy_ukernel__scalar_memcpy;
transpose_config.x8 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x8_transposec_ukernel__2x4_scalar_int,
.tile_size = 32,
};
transpose_config.x16 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x16_transposec_ukernel__2x4_scalar_int,
.tile_size = 32,
};
transpose_config.x24 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x24_transposec_ukernel__1x2_scalar,
.tile_size = 32,
};
transpose_config.x32 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x32_transposec_ukernel__2x4_scalar_int,
.tile_size = 32,
};
transpose_config.xx = (struct xnn_transpose_subconfig) {
.variable_size_ukernel = xnn_xx_transposev_ukernel__1x1_scalar_memcpy,
.tile_size = 32,
};
}
#elif XNN_ARCH_ARM64
transpose_config.copy = (xnn_vunary_ukernel_fn) xnn_xx_copy_ukernel__scalar_memcpy;
transpose_config.x8 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x8_transposec_ukernel__16x16_reuse_dec_zip_neon,
.tile_size = 32,
};
transpose_config.x16 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x16_transposec_ukernel__8x8_reuse_dec_zip_neon,
.tile_size = 32,
};
transpose_config.x24 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x24_transposec_ukernel__4x4_aarch64_neon_tbl128,
.init.x24 = (xnn_init_x24_transpose_params_fn) xnn_init_x24_transpose_neon_tbl128_params,
.tile_size = 32,
};
transpose_config.x32 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x32_transposec_ukernel__4x4_aarch64_neon_tbl128,
.tile_size = 32,
.init.x32 = (xnn_init_x32_transpose_params_fn) xnn_init_x32_transpose_neon_tbl128_params,
};
transpose_config.xx = (struct xnn_transpose_subconfig) {
.variable_size_ukernel = xnn_xx_transposev_ukernel__1x1_scalar_memcpy,
.tile_size = 32,
};
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
transpose_config.copy = (xnn_vunary_ukernel_fn) xnn_xx_copy_ukernel__scalar_memcpy;
transpose_config.xx = (struct xnn_transpose_subconfig) {
.variable_size_ukernel = xnn_xx_transposev_ukernel__1x1_scalar_memcpy,
.tile_size = 32,
};
transpose_config.x8 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x8_transposec_ukernel__16x16_reuse_mov_sse2,
.tile_size = 32,
};
transpose_config.x16 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x16_transposec_ukernel__8x8_reuse_multi_sse2,
.tile_size = 32,
};
transpose_config.x24 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x24_transposec_ukernel__1x2_scalar,
.tile_size = 32,
};
transpose_config.x32 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x32_transposec_ukernel__4x4_sse,
.tile_size = 32,
};
if (hardware_config->use_x86_ssse3) {
transpose_config.x24 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x24_transposec_ukernel__4x4_ssse3,
.init.x24 = (xnn_init_x24_transpose_params_fn) xnn_init_x24_transpose_ssse3_params,
.tile_size = 32,
};
}
if (hardware_config->use_x86_avx) {
transpose_config.x32 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x32_transposec_ukernel__8x8_reuse_multi_avx,
.init.x32 = (xnn_init_x32_transpose_params_fn) xnn_init_x32_transpose_avx_params,
.tile_size = 32,
};
}
if (hardware_config->use_x86_avx2) {
transpose_config.x8 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x8_transposec_ukernel__32x32_reuse_switch_avx2,
.init.x8 = (xnn_init_x8_transpose_params_fn) xnn_init_x8_transpose_avx2_params,
.tile_size = 32,
};
transpose_config.x16 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x16_transposec_ukernel__16x16_reuse_switch_avx2,
.init.x16 = (xnn_init_x16_transpose_params_fn) xnn_init_x16_transpose_avx2_params,
.tile_size = 32,
};
}
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
transpose_config.copy = (xnn_vunary_ukernel_fn) xnn_xx_copy_ukernel__scalar_memcpy;
transpose_config.x8 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x8_transposec_ukernel__16x16_reuse_mov_wasmsimd,
.tile_size = 32,
};
transpose_config.x16 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x16_transposec_ukernel__8x8_reuse_mov_wasmsimd,
.tile_size = 32,
};
transpose_config.x24 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x24_transposec_ukernel__1x2_scalar,
.tile_size = 32,
};
transpose_config.x32 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x32_transposec_ukernel__4x4_reuse_mov_wasmsimd,
.tile_size = 32,
};
transpose_config.xx = (struct xnn_transpose_subconfig) {
.variable_size_ukernel = xnn_xx_transposev_ukernel__1x1_scalar_memcpy,
.tile_size = 32,
};
#elif XNN_ARCH_WASM
transpose_config.copy = (xnn_vunary_ukernel_fn) xnn_xx_copy_ukernel__scalar_memcpy;
transpose_config.x8 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x8_transposec_ukernel__2x4_scalar_int,
.tile_size = 32,
};
transpose_config.x16 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x16_transposec_ukernel__2x4_scalar_int,
.tile_size = 32,
};
transpose_config.x24 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x24_transposec_ukernel__1x2_scalar,
.tile_size = 32,
};
transpose_config.x32 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x32_transposec_ukernel__2x4_scalar_int,
.tile_size = 32,
};
transpose_config.xx = (struct xnn_transpose_subconfig) {
.variable_size_ukernel = xnn_xx_transposev_ukernel__1x1_scalar_memcpy,
.tile_size = 32,
};
#elif XNN_ARCH_RISCV
transpose_config.copy = (xnn_vunary_ukernel_fn) xnn_xx_copy_ukernel__scalar_memcpy;
transpose_config.x8 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x8_transposec_ukernel__2x4_scalar_int,
.tile_size = 32,
};
transpose_config.x16 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x16_transposec_ukernel__2x4_scalar_int,
.tile_size = 32,
};
transpose_config.x24 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x24_transposec_ukernel__1x2_scalar,
.tile_size = 32,
};
transpose_config.x32 = (struct xnn_transpose_subconfig) {
.const_size_ukernel = (xnn_transposec_ukernel_fn) xnn_x32_transposec_ukernel__2x4_scalar_int,
.tile_size = 32,
};
transpose_config.xx = (struct xnn_transpose_subconfig) {
.variable_size_ukernel = xnn_xx_transposev_ukernel__1x1_scalar_memcpy,
.tile_size = 32,
};
#else
#error "Unsupported architecture"
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_transpose_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_transpose_config();
return TRUE;
}
#endif
const struct xnn_transpose_config* xnn_init_transpose_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard, &init_transpose_config_windows, NULL, NULL);
#else
pthread_once(&init_guard, &init_transpose_config);
#endif
return &transpose_config;
}
| 10,779 | 42.821138 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/unpool-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/unpool.h>
static struct xnn_unpool_config x32_unpool_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_x32_unpool = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_x32_unpool = PTHREAD_ONCE_INIT;
#endif
static void init_x32_unpool_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
x32_unpool_config.unpool = (xnn_unpool_ukernel_fn) xnn_x32_unpool_ukernel__neon;
} else if (!XNN_PLATFORM_MOBILE) {
x32_unpool_config.unpool = (xnn_unpool_ukernel_fn) xnn_x32_unpool_ukernel__scalar;
}
#elif XNN_ARCH_ARM64
x32_unpool_config.unpool = (xnn_unpool_ukernel_fn) xnn_x32_unpool_ukernel__neon;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
x32_unpool_config.unpool = (xnn_unpool_ukernel_fn) xnn_x32_unpool_ukernel__sse2;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
x32_unpool_config.unpool = (xnn_unpool_ukernel_fn) xnn_x32_unpool_ukernel__wasmsimd;
#elif XNN_ARCH_WASM
x32_unpool_config.unpool = (xnn_unpool_ukernel_fn) xnn_x32_unpool_ukernel__scalar;
#elif XNN_ARCH_RISCV
x32_unpool_config.unpool = (xnn_unpool_ukernel_fn) xnn_x32_unpool_ukernel__scalar;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_x32_unpool_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_x32_unpool_config();
return TRUE;
}
#endif
const struct xnn_unpool_config* xnn_init_x32_unpool_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_x32_unpool, &init_x32_unpool_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_x32_unpool, &init_x32_unpool_config);
#endif
return &x32_unpool_config;
}
| 2,316 | 32.1 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/vmulcaddc-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/vmulcaddc.h>
static struct xnn_vmulcaddc_config f16_vmulcaddc_config = {0};
static struct xnn_vmulcaddc_config f32_vmulcaddc_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_f16_vmulcaddc = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_f32_vmulcaddc = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_f16_vmulcaddc = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_f32_vmulcaddc = PTHREAD_ONCE_INIT;
#endif
static void init_f16_vmulcaddc_config(void) {
#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f16_vmulcaddc_minmax_ukernel_c8__neonfp16arith_2x;
f16_vmulcaddc_config.init.f16 = xnn_init_f16_minmax_fp16arith_params;
f16_vmulcaddc_config.channel_tile = 8;
f16_vmulcaddc_config.row_tile = 2;
}
#elif XNN_ARCH_ARM64 && XNN_ENABLE_ARM_FP16_VECTOR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon_fp16_arith) {
f16_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f16_vmulcaddc_minmax_ukernel_c8__neonfp16arith_2x;
f16_vmulcaddc_config.init.f16 = xnn_init_f16_minmax_fp16arith_params;
f16_vmulcaddc_config.channel_tile = 8;
f16_vmulcaddc_config.row_tile = 2;
}
#elif (XNN_ARCH_X86 || XNN_ARCH_X86_64) && !XNN_PLATFORM_MOBILE
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_x86_avx2) {
f16_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f16_vmulcaddc_minmax_ukernel_c8__fma3_2x;
f16_vmulcaddc_config.init.f16 = xnn_init_f16_minmax_avx_params;
f16_vmulcaddc_config.channel_tile = 8;
f16_vmulcaddc_config.row_tile = 2;
}
#endif
}
static void init_f32_vmulcaddc_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
f32_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f32_vmulcaddc_minmax_ukernel_c4__neon_2x;
f32_vmulcaddc_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_vmulcaddc_config.channel_tile = 4;
f32_vmulcaddc_config.row_tile = 2;
} else if (!XNN_PLATFORM_MOBILE) {
f32_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f32_vmulcaddc_minmax_ukernel_c1__scalar_2x;
f32_vmulcaddc_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_vmulcaddc_config.channel_tile = 1;
f32_vmulcaddc_config.row_tile = 2;
}
#elif XNN_ARCH_ARM64
f32_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f32_vmulcaddc_minmax_ukernel_c4__neonfma_2x;
f32_vmulcaddc_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_vmulcaddc_config.channel_tile = 4;
f32_vmulcaddc_config.row_tile = 2;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
f32_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f32_vmulcaddc_minmax_ukernel_c4__sse_2x;
f32_vmulcaddc_config.init.f32 = xnn_init_f32_minmax_sse_params;
f32_vmulcaddc_config.channel_tile = 4;
f32_vmulcaddc_config.row_tile = 2;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
#if XNN_ARCH_WASMRELAXEDSIMD
f32_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmrelaxedsimd_fma_2x;
f32_vmulcaddc_config.init.f32 = xnn_init_f32_minmax_wasmsimd_params;
f32_vmulcaddc_config.channel_tile = 4;
f32_vmulcaddc_config.row_tile = 2;
#else
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
f32_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmsimd_x86_2x;
f32_vmulcaddc_config.init.f32 = xnn_init_f32_minmax_wasmsimd_params;
f32_vmulcaddc_config.channel_tile = 4;
f32_vmulcaddc_config.row_tile = 2;
} else {
f32_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f32_vmulcaddc_minmax_ukernel_c4__wasmsimd_arm_2x;
f32_vmulcaddc_config.init.f32 = xnn_init_f32_minmax_wasmsimd_params;
f32_vmulcaddc_config.channel_tile = 4;
f32_vmulcaddc_config.row_tile = 2;
}
#endif
#elif XNN_ARCH_WASM
f32_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f32_vmulcaddc_minmax_ukernel_c1__wasm_2x;
f32_vmulcaddc_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_vmulcaddc_config.channel_tile = 1;
f32_vmulcaddc_config.row_tile = 2;
#elif XNN_ARCH_RISCV
f32_vmulcaddc_config.ukernel = (xnn_vmulcaddc_ukernel_fn) xnn_f32_vmulcaddc_minmax_ukernel_c1__scalar_2x;
f32_vmulcaddc_config.init.f32 = xnn_init_f32_minmax_scalar_params;
f32_vmulcaddc_config.channel_tile = 1;
f32_vmulcaddc_config.row_tile = 2;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_f16_vmulcaddc_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f16_vmulcaddc_config();
return TRUE;
}
static BOOL CALLBACK init_f32_vmulcaddc_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_f32_vmulcaddc_config();
return TRUE;
}
#endif
const struct xnn_vmulcaddc_config* xnn_init_f16_vmulcaddc_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL || !xnn_is_f16_compatible_config(hardware_config)) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f16_vmulcaddc, &init_f16_vmulcaddc_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f16_vmulcaddc, &init_f16_vmulcaddc_config);
#endif
return &f16_vmulcaddc_config;
}
const struct xnn_vmulcaddc_config* xnn_init_f32_vmulcaddc_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_f32_vmulcaddc, &init_f32_vmulcaddc_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_f32_vmulcaddc, &init_f32_vmulcaddc_config);
#endif
return &f32_vmulcaddc_config;
}
| 6,924 | 42.553459 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/x8-lut-config.c
|
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/lut.h>
static struct xnn_x8_lut_config x8_lut_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard = PTHREAD_ONCE_INIT;
#endif
static void init_x8_lut_config(void) {
#if XNN_ARCH_ARM
x8_lut_config.microkernel = xnn_x8_lut_ukernel__scalar_x4;
x8_lut_config.tile_size = 4;
#elif XNN_ARCH_ARM64
x8_lut_config.microkernel = xnn_x8_lut_ukernel__aarch64_neon_tbx128x4_x64;
x8_lut_config.tile_size = 64;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (!XNN_PLATFORM_MOBILE && hardware_config->use_x86_avx512skx) {
if (hardware_config->use_x86_avx512vbmi) {
x8_lut_config.microkernel = xnn_x8_lut_ukernel__avx512vbmi_vpermx2b_x128;
x8_lut_config.tile_size = 128;
} else {
x8_lut_config.microkernel = xnn_x8_lut_ukernel__avx512skx_vpshufb_x64;
x8_lut_config.tile_size = 64;
}
} else if (hardware_config->use_x86_avx2) {
x8_lut_config.microkernel = xnn_x8_lut_ukernel__avx2_x128;
x8_lut_config.tile_size = 128;
} else if (hardware_config->use_x86_avx) {
x8_lut_config.microkernel = xnn_x8_lut_ukernel__avx_x64;
x8_lut_config.tile_size = 64;
} else {
// Note: SSSE3 version is usually slower than scalar
x8_lut_config.microkernel = xnn_x8_lut_ukernel__scalar_x4;
x8_lut_config.tile_size = 4;
}
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->is_x86) {
#if XNN_ARCH_WASMRELAXEDSIMD
if (hardware_config->use_wasm_pshufb) {
x8_lut_config.microkernel = xnn_x8_lut_ukernel__wasmpshufb_x32;
x8_lut_config.tile_size = 32;
} else {
x8_lut_config.microkernel = xnn_x8_lut_ukernel__scalar_x1;
x8_lut_config.tile_size = 1;
}
#else
x8_lut_config.microkernel = xnn_x8_lut_ukernel__scalar_x1;
x8_lut_config.tile_size = 1;
#endif
} else {
x8_lut_config.microkernel = xnn_x8_lut_ukernel__wasmsimd_x32;
x8_lut_config.tile_size = 32;
}
#elif XNN_ARCH_WASM
x8_lut_config.microkernel = xnn_x8_lut_ukernel__scalar_x1;
x8_lut_config.tile_size = 1;
#elif XNN_ARCH_RISCV
x8_lut_config.microkernel = xnn_x8_lut_ukernel__scalar_x4;
x8_lut_config.tile_size = 4;
#else
#error "Unsupported architecture"
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_x8_lut_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_x8_lut_config();
return TRUE;
}
#endif
const struct xnn_x8_lut_config* xnn_init_x8_lut_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard, &init_x8_lut_config_windows, NULL, NULL);
#else
pthread_once(&init_guard, &init_x8_lut_config);
#endif
return &x8_lut_config;
}
| 3,547 | 31.550459 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/xx-fill-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/fill.h>
static struct xnn_xx_fill_config xx_fill_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard = PTHREAD_ONCE_INIT;
#endif
static void init_xx_fill_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
xx_fill_config.ukernel = (xnn_fill_ukernel_fn) xnn_xx_fill_ukernel__neon_x64;
xx_fill_config.row_tile = 1;
} else if (!XNN_PLATFORM_MOBILE) {
xx_fill_config.ukernel = (xnn_fill_ukernel_fn) xnn_xx_fill_ukernel__scalar_x16;
xx_fill_config.row_tile = 1;
}
#elif XNN_ARCH_ARM64
xx_fill_config.ukernel = (xnn_fill_ukernel_fn) xnn_xx_fill_ukernel__neon_x64;
xx_fill_config.row_tile = 1;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
xx_fill_config.ukernel = (xnn_fill_ukernel_fn) xnn_xx_fill_ukernel__sse2_x64;
xx_fill_config.row_tile = 1;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
xx_fill_config.ukernel = (xnn_fill_ukernel_fn) xnn_xx_fill_ukernel__wasmsimd_x64;
xx_fill_config.row_tile = 1;
#elif XNN_ARCH_WASM
xx_fill_config.ukernel = (xnn_fill_ukernel_fn) xnn_xx_fill_ukernel__scalar_x16;
xx_fill_config.row_tile = 1;
#elif XNN_ARCH_RISCV
xx_fill_config.ukernel = (xnn_fill_ukernel_fn) xnn_xx_fill_ukernel__scalar_x16;
xx_fill_config.row_tile = 1;
#else
#error "Unsupported architecture"
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_xx_fill_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_xx_fill_config();
return TRUE;
}
#endif
const struct xnn_xx_fill_config* xnn_init_xx_fill_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard, &init_xx_fill_config_windows, NULL, NULL);
#else
pthread_once(&init_guard, &init_xx_fill_config);
#endif
return &xx_fill_config;
}
| 2,470 | 30.679487 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/xx-pad-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/pad.h>
static struct xnn_xx_pad_config xx_pad_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard = PTHREAD_ONCE_INIT;
#endif
static void init_xx_pad_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
xx_pad_config.ukernel = (xnn_pad_ukernel_fn) xnn_xx_pad_ukernel__neon;
xx_pad_config.row_tile = 1;
} else if (!XNN_PLATFORM_MOBILE) {
xx_pad_config.ukernel = (xnn_pad_ukernel_fn) xnn_xx_pad_ukernel__scalar;
xx_pad_config.row_tile = 1;
}
#elif XNN_ARCH_ARM64
xx_pad_config.ukernel = (xnn_pad_ukernel_fn) xnn_xx_pad_ukernel__neon;
xx_pad_config.row_tile = 1;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
xx_pad_config.ukernel = (xnn_pad_ukernel_fn) xnn_xx_pad_ukernel__sse2;
xx_pad_config.row_tile = 1;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
xx_pad_config.ukernel = (xnn_pad_ukernel_fn) xnn_xx_pad_ukernel__wasmsimd;
xx_pad_config.row_tile = 1;
#elif XNN_ARCH_WASM
xx_pad_config.ukernel = (xnn_pad_ukernel_fn) xnn_xx_pad_ukernel__scalar;
xx_pad_config.row_tile = 1;
#elif XNN_ARCH_RISCV
xx_pad_config.ukernel = (xnn_pad_ukernel_fn) xnn_xx_pad_ukernel__scalar;
xx_pad_config.row_tile = 1;
#else
#error "Unsupported architecture"
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_xx_pad_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_xx_pad_config();
return TRUE;
}
#endif
const struct xnn_xx_pad_config* xnn_init_xx_pad_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard, &init_xx_pad_config_windows, NULL, NULL);
#else
pthread_once(&init_guard, &init_xx_pad_config);
#endif
return &xx_pad_config;
}
| 2,403 | 29.820513 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/zip-config.c
|
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <pthread.h>
#endif
#include <xnnpack/common.h>
#include <xnnpack/config.h>
#include <xnnpack/microparams-init.h>
#include <xnnpack/zip.h>
static struct xnn_zip_config x8_zip_config = {0};
static struct xnn_zip_config x32_zip_config = {0};
#if XNN_PLATFORM_WINDOWS
static INIT_ONCE init_guard_x8_zip = INIT_ONCE_STATIC_INIT;
static INIT_ONCE init_guard_x32_zip = INIT_ONCE_STATIC_INIT;
#else
static pthread_once_t init_guard_x8_zip = PTHREAD_ONCE_INIT;
static pthread_once_t init_guard_x32_zip = PTHREAD_ONCE_INIT;
#endif
static void init_x8_zip_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
x8_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x2_ukernel__neon;
x8_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x3_ukernel__neon;
x8_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x4_ukernel__neon;
x8_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x8_zip_xm_ukernel__neon;
} else if (!XNN_PLATFORM_MOBILE) {
x8_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x2_ukernel__scalar;
x8_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x3_ukernel__scalar;
x8_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x4_ukernel__scalar;
x8_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x8_zip_xm_ukernel__scalar;
}
#elif XNN_ARCH_ARM64
x8_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x2_ukernel__neon;
x8_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x3_ukernel__neon;
x8_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x4_ukernel__neon;
x8_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x8_zip_xm_ukernel__neon;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
x8_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x2_ukernel__sse2;
x8_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x3_ukernel__sse2;
x8_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x4_ukernel__sse2;
x8_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x8_zip_xm_ukernel__sse2;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
x8_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x2_ukernel__scalar;
x8_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x3_ukernel__scalar;
x8_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x4_ukernel__scalar;
x8_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x8_zip_xm_ukernel__scalar;
#elif XNN_ARCH_WASM
x8_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x2_ukernel__scalar;
x8_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x3_ukernel__scalar;
x8_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x4_ukernel__scalar;
x8_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x8_zip_xm_ukernel__scalar;
#elif XNN_ARCH_RISCV
x8_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x2_ukernel__scalar;
x8_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x3_ukernel__scalar;
x8_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x8_zip_x4_ukernel__scalar;
x8_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x8_zip_xm_ukernel__scalar;
#endif
}
static void init_x32_zip_config(void) {
#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
if (hardware_config->use_arm_neon) {
x32_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x2_ukernel__neon;
x32_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x3_ukernel__neon;
x32_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x4_ukernel__neon;
x32_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x32_zip_xm_ukernel__neon;
} else if (!XNN_PLATFORM_MOBILE) {
x32_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x2_ukernel__scalar;
x32_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x3_ukernel__scalar;
x32_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x4_ukernel__scalar;
x32_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x32_zip_xm_ukernel__scalar;
}
#elif XNN_ARCH_ARM64
x32_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x2_ukernel__neon;
x32_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x3_ukernel__neon;
x32_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x4_ukernel__neon;
x32_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x32_zip_xm_ukernel__neon;
#elif XNN_ARCH_X86 || XNN_ARCH_X86_64
x32_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x2_ukernel__sse2;
x32_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x3_ukernel__sse2;
x32_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x4_ukernel__sse2;
x32_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x32_zip_xm_ukernel__sse2;
#elif XNN_ARCH_WASMSIMD || XNN_ARCH_WASMRELAXEDSIMD
x32_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x2_ukernel__wasmsimd;
x32_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x3_ukernel__wasmsimd;
x32_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x4_ukernel__wasmsimd;
x32_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x32_zip_xm_ukernel__wasmsimd;
#elif XNN_ARCH_WASM
x32_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x2_ukernel__scalar;
x32_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x3_ukernel__scalar;
x32_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x4_ukernel__scalar;
x32_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x32_zip_xm_ukernel__scalar;
#elif XNN_ARCH_RISCV
x32_zip_config.x2 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x2_ukernel__scalar;
x32_zip_config.x3 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x3_ukernel__scalar;
x32_zip_config.x4 = (xnn_zipc_ukernel_fn) xnn_x32_zip_x4_ukernel__scalar;
x32_zip_config.xm = (xnn_zipv_ukernel_fn) xnn_x32_zip_xm_ukernel__scalar;
#endif
}
#if XNN_PLATFORM_WINDOWS
static BOOL CALLBACK init_x8_zip_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_x8_zip_config();
return TRUE;
}
static BOOL CALLBACK init_x32_zip_config_windows(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
init_x32_zip_config();
return TRUE;
}
#endif
const struct xnn_zip_config* xnn_init_x8_zip_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_x8_zip, &init_x8_zip_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_x8_zip, &init_x8_zip_config);
#endif
return &x8_zip_config;
}
const struct xnn_zip_config* xnn_init_x32_zip_config() {
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
if (hardware_config == NULL) {
return NULL;
}
#if XNN_PLATFORM_WINDOWS
InitOnceExecuteOnce(&init_guard_x32_zip, &init_x32_zip_config_windows, NULL, NULL);
#else
pthread_once(&init_guard_x32_zip, &init_x32_zip_config);
#endif
return &x32_zip_config;
}
| 7,144 | 45.096774 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/amalgam/gen/avx512vbmi.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
void xnn_x8_lut_ukernel__avx512vbmi_vpermx2b_x128(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vtable0 = _mm512_loadu_si512(table);
const __m512i vtable1 = _mm512_loadu_si512(table + 64);
const __m512i vtable2 = _mm512_loadu_si512(table + 128);
const __m512i vtable3 = _mm512_loadu_si512(table + 192);
for (; batch >= 128 * sizeof(uint8_t); batch -= 128 * sizeof(uint8_t)) {
const __m512i vx0 = _mm512_loadu_si512(input);
const __m512i vx1 = _mm512_loadu_si512(input + 64);
input += 128;
__m512i vy0 = _mm512_permutex2var_epi8(vtable0, vx0, vtable1);
const __mmask64 vm0 = _mm512_movepi8_mask(vx0);
__m512i vy1 = _mm512_permutex2var_epi8(vtable0, vx1, vtable1);
const __mmask64 vm1 = _mm512_movepi8_mask(vx1);
const __m512i vt0 = _mm512_permutex2var_epi8(vtable2, vx0, vtable3);
const __m512i vt1 = _mm512_permutex2var_epi8(vtable2, vx1, vtable3);
vy0 = _mm512_mask_mov_epi8(vy0, vm0, vt0);
vy1 = _mm512_mask_mov_epi8(vy1, vm1, vt1);
_mm512_storeu_si512(output, vy0);
_mm512_storeu_si512(output + 64, vy1);
output += 128;
}
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m512i vx = _mm512_loadu_si512(input);
input += 64;
__m512i vy = _mm512_permutex2var_epi8(vtable0, vx, vtable1);
const __mmask64 vm = _mm512_movepi8_mask(vx);
const __m512i vt = _mm512_permutex2var_epi8(vtable2, vx, vtable3);
vy = _mm512_mask_mov_epi8(vy, vm, vt);
_mm512_storeu_si512(output, vy);
output += 64;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch < 64);
const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << batch) - UINT64_C(1)));
__m512i vx = _mm512_maskz_loadu_epi8(vmask, input);
__m512i vy = _mm512_maskz_permutex2var_epi8(vmask, vtable0, vx, vtable1);
const __mmask64 vm = _mm512_movepi8_mask(vx);
const __m512i vt = _mm512_maskz_permutex2var_epi8(vmask, vtable2, vx, vtable3);
vy = _mm512_mask_mov_epi8(vy, vm, vt);
_mm512_mask_storeu_epi8(output, vmask, vy);
}
}
| 2,554 | 32.181818 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/amalgam/gen/fp16arith.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <string.h>
#include <arm_fp16.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f16_vdiv_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
const float16_t va0 = *a++;
const float16_t va1 = *a++;
const float16_t vb0 = *b++;
const float16_t vb1 = *b++;
float16_t vacc0 = vdivh_f16(va0, vb0);
float16_t vacc1 = vdivh_f16(va1, vb1);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
*o++ = vacc0;
*o++ = vacc1;
}
if XNN_UNLIKELY(batch != 0) {
const float16_t va = *a;
const float16_t vb = *b;
float16_t vacc = vdivh_f16(va, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
void xnn_f16_vdivc_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vdivh_f16(vacc0, vb);
vacc1 = vdivh_f16(vacc1, vb);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vdivh_f16(vacc, vb);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
void xnn_f16_vrdivc_minmax_ukernel__fp16arith_x2(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float16_t* a = (const float16_t*) input_a;
const float16_t* b = (const float16_t*) input_b;
float16_t* o = (float16_t*) output;
float16_t vy_min, vy_max;
memcpy(&vy_min, ¶ms->fp16arith.min, sizeof(vy_min));
memcpy(&vy_max, ¶ms->fp16arith.max, sizeof(vy_max));
const float16_t vb = *b;
for (; batch >= 2 * sizeof(float16_t); batch -= 2 * sizeof(float16_t)) {
float16_t vacc0 = a[0];
float16_t vacc1 = a[1];
a += 2;
vacc0 = vdivh_f16(vb, vacc0);
vacc1 = vdivh_f16(vb, vacc1);
vacc0 = vmaxnmh_f16(vacc0, vy_min);
vacc1 = vmaxnmh_f16(vacc1, vy_min);
vacc0 = vminnmh_f16(vacc0, vy_max);
vacc1 = vminnmh_f16(vacc1, vy_max);
o[0] = vacc0;
o[1] = vacc1;
o += 2;
}
if XNN_UNLIKELY(batch != 0) {
float16_t vacc = *a;
vacc = vdivh_f16(vb, vacc);
vacc = vmaxnmh_f16(vacc, vy_min);
vacc = vminnmh_f16(vacc, vy_max);
*o = vacc;
}
}
| 4,380 | 25.233533 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/amalgam/gen/neonfp16.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
#include <xnnpack/vcvt.h>
void xnn_f16_f32_vcvt_ukernel__neonfp16_x16(
size_t batch,
const void* input,
float* output,
const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
const float16x8_t vh0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float32x4_t vf0 = vcvt_f32_f16(vget_low_f16(vh0));
const float32x4_t vf1 = vcvt_f32_f16(vget_high_f16(vh0));
const float32x4_t vf2 = vcvt_f32_f16(vget_low_f16(vh1));
const float32x4_t vf3 = vcvt_f32_f16(vget_high_f16(vh1));
vst1q_f32(output, vf0); output += 4;
vst1q_f32(output, vf1); output += 4;
vst1q_f32(output, vf2); output += 4;
vst1q_f32(output, vf3); output += 4;
}
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t vh = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float32x4_t vf_lo = vcvt_f32_f16(vget_low_f16(vh));
const float32x4_t vf_hi = vcvt_f32_f16(vget_high_f16(vh));
vst1q_f32(output, vf_lo); output += 4;
vst1q_f32(output, vf_hi); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint16_t));
assert(batch <= 7 * sizeof(uint16_t));
const float16x8_t vh = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float32x4_t vf = vcvt_f32_f16(vget_low_f16(vh));
if (batch & (4 * sizeof(uint16_t))) {
vst1q_f32(output, vf); output += 4;
vf = vcvt_f32_f16(vget_high_f16(vh));
}
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(uint16_t))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
void xnn_f16_f32acc_rsum_ukernel__neonfp16_x32_acc4(
size_t batch,
const void* input,
void* output,
const union xnn_f16_f32acc_scale_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) {
const float16x8_t vh01 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh23 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh45 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vh67 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float32x4_t vt0 = vcvt_f32_f16(vget_low_f16(vh01));
const float32x4_t vt1 = vcvt_f32_f16(vget_high_f16(vh01));
const float32x4_t vt2 = vcvt_f32_f16(vget_low_f16(vh23));
const float32x4_t vt3 = vcvt_f32_f16(vget_high_f16(vh23));
const float32x4_t vt4 = vcvt_f32_f16(vget_low_f16(vh45));
const float32x4_t vt5 = vcvt_f32_f16(vget_high_f16(vh45));
const float32x4_t vt6 = vcvt_f32_f16(vget_low_f16(vh67));
const float32x4_t vt7 = vcvt_f32_f16(vget_high_f16(vh67));
vacc0 = vaddq_f32(vacc0, vt0);
vacc1 = vaddq_f32(vacc1, vt1);
vacc2 = vaddq_f32(vacc2, vt2);
vacc3 = vaddq_f32(vacc3, vt3);
vacc0 = vaddq_f32(vacc0, vt4);
vacc1 = vaddq_f32(vacc1, vt5);
vacc2 = vaddq_f32(vacc2, vt6);
vacc3 = vaddq_f32(vacc3, vt7);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
for (; batch >= 4 * sizeof(uint16_t); batch -= 4 * sizeof(uint16_t)) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_u16(i)); i += 4;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc0 = vaddq_f32(vacc0, vt);
}
const float32x2_t vscale = vld1_dup_f32(¶ms->scalar.scale);
float32x2_t vacc = vadd_f32(vget_low_f32(vacc0), vget_high_f32(vacc0));
if XNN_UNLIKELY(batch & (2 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u32(vld1_dup_u32((const void*) i)); i += 2;
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vpadd_f32(vacc, vacc);
if XNN_UNLIKELY(batch & (1 * sizeof(uint16_t))) {
const float16x4_t vh = vreinterpret_f16_u16(vld1_dup_u16(i));
const float32x4_t vt = vcvt_f32_f16(vh);
vacc = vadd_f32(vacc, vget_low_f32(vt));
}
vacc = vmul_f32(vacc, vscale);
const float16x4_t vout = vcvt_f16_f32(vcombine_f32(vacc, vacc));
vst1_lane_u16(o, vreinterpret_u16_f16(vout), 0);
}
void xnn_f32_f16_vcvt_ukernel__neonfp16_x16(
size_t batch,
const float* input,
void* output,
const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
uint16_t* o = (uint16_t*) output;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vf0 = vld1q_f32(input); input += 4;
const float32x4_t vf1 = vld1q_f32(input); input += 4;
const float32x4_t vf2 = vld1q_f32(input); input += 4;
const float32x4_t vf3 = vld1q_f32(input); input += 4;
const uint16x8_t vh0 = vreinterpretq_u16_f16(vcombine_f16(vcvt_f16_f32(vf0), vcvt_f16_f32(vf1)));
const uint16x8_t vh1 = vreinterpretq_u16_f16(vcombine_f16(vcvt_f16_f32(vf2), vcvt_f16_f32(vf3)));
vst1q_u16(o, vh0); o += 8;
vst1q_u16(o, vh1); o += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vf = vld1q_f32(input); input += 4;
const uint16x4_t vh = vreinterpret_u16_f16(vcvt_f16_f32(vf));
vst1_u16(o, vh); o += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch % sizeof(float) == 0);
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vf = vld1q_f32(input);
uint16x4_t vh = vreinterpret_u16_f16(vcvt_f16_f32(vf));
if (batch & (2 * sizeof(float))) {
vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2;
vh = vext_u16(vh, vh, 2);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_u16(o, vh, 0);
}
}
}
| 6,740 | 35.241935 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/amalgam/gen/neonfp16arith-aarch64.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/microparams.h>
#include <xnnpack/vbinary.h>
#include <xnnpack/vunary.h>
void xnn_f16_vdiv_minmax_ukernel__aarch64_neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b)); b += 8;
float16x8_t vy01234567 = vdivq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
const float16x8_t vb01234567 = vreinterpretq_f16_u16(vld1q_u16(b));
float16x8_t vy01234567 = vdivq_f16(va01234567, vb01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
void xnn_f16_vdivc_minmax_ukernel__aarch64_neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vdivq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vdivq_f16(va01234567, vb);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
void xnn_f16_vrdivc_minmax_ukernel__aarch64_neonfp16arith_x8(
size_t batch,
const void* restrict input_a,
const void* restrict input_b,
void* restrict output,
const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint16_t* a = (const uint16_t*) input_a;
const uint16_t* b = (const uint16_t*) input_b;
uint16_t* o = (uint16_t*) output;
const float16x8_t vy_min = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.min));
const float16x8_t vy_max = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->fp16arith.max));
const float16x8_t vb = vreinterpretq_f16_u16(vld1q_dup_u16(b));
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a)); a += 8;
float16x8_t vy01234567 = vdivq_f16(vb, va01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
vst1q_u16(o, vreinterpretq_u16_f16(vy01234567)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t va01234567 = vreinterpretq_f16_u16(vld1q_u16(a));
float16x8_t vy01234567 = vdivq_f16(vb, va01234567);
vy01234567 = vmaxq_f16(vy01234567, vy_min);
vy01234567 = vminq_f16(vy01234567, vy_max);
float16x4_t vy0123 = vget_low_f16(vy01234567);
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vy0123)); o += 4;
vy0123 = vget_high_f16(vy01234567);
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy0123), 0); o += 2;
vy0123 = vext_f16(vy0123, vy0123, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy0123), 0);
}
}
}
void xnn_f16_vsqrt_ukernel__aarch64_neonfp16arith_sqrt_x8(
size_t batch,
const void* input,
void* output,
const union xnn_f16_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
vacc = vsqrtq_f16(vacc);
vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8;
}
if XNN_UNLIKELY(batch != 0) {
const float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i));
float16x4_t vacc_lo = vsqrt_f16(vget_low_f16(vacc));
if (batch & (4 * sizeof(uint16_t))) {
vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4;
vacc_lo = vsqrt_f16(vget_high_f16(vacc));
}
if (batch & (2 * sizeof(uint16_t))) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2;
vacc_lo = vext_f16(vacc_lo, vacc_lo, 2);
}
if (batch & (1 * sizeof(uint16_t))) {
vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0);
}
}
}
void xnn_f16_vtanh_ukernel__aarch64_neonfp16arith_expm1minus_rr1_p3h2ts_div_x32(
size_t n,
const void* input,
void* output,
const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(n != 0);
assert(n % sizeof(uint16_t) == 0);
assert(input != NULL);
assert(output != NULL);
const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482)));
const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F)));
const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5)));
const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C)));
const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B)));
const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008)));
const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000)));
const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00)));
const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000));
const uint16_t* i = (const uint16_t*) input;
uint16_t* o = (uint16_t*) output;
for (; n >= 4 * sizeof(float16x8_t); n -= 4 * sizeof(float16x8_t)) {
const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz0 = vabsq_f16(vx0);
float16x8_t vz1 = vabsq_f16(vx1);
float16x8_t vz2 = vabsq_f16(vx2);
float16x8_t vz3 = vabsq_f16(vx3);
vz0 = vminq_f16(vz0, vsat_cutoff);
vz1 = vminq_f16(vz1, vsat_cutoff);
vz2 = vminq_f16(vz2, vsat_cutoff);
vz3 = vminq_f16(vz3, vsat_cutoff);
float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
vn0 = vsubq_f16(vn0, vmagic_bias);
const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
vn1 = vsubq_f16(vn1, vmagic_bias);
const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
vn2 = vsubq_f16(vn2, vmagic_bias);
const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
vn3 = vsubq_f16(vn3, vmagic_bias);
const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2);
const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2);
const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2);
const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2);
float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0);
float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1);
float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2);
float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3);
vp0 = vfmsq_f16(vtwo, vp0, vt0);
vp1 = vfmsq_f16(vtwo, vp1, vt1);
vp2 = vfmsq_f16(vtwo, vp2, vt2);
vp3 = vfmsq_f16(vtwo, vp3, vt3);
const float16x8_t vts0 = vmulq_f16(vt0, vs0);
const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one);
const float16x8_t vts1 = vmulq_f16(vt1, vs1);
const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one);
const float16x8_t vts2 = vmulq_f16(vt2, vs2);
const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one);
const float16x8_t vts3 = vmulq_f16(vt3, vs3);
const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one);
const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0);
const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1);
const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2);
const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3);
const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo);
const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo);
const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo);
const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo);
float16x8_t vy0 = vdivq_f16(vemo0, vepo0);
float16x8_t vy1 = vdivq_f16(vemo1, vepo1);
float16x8_t vy2 = vdivq_f16(vemo2, vepo2);
float16x8_t vy3 = vdivq_f16(vemo3, vepo3);
vy0 = vbslq_f16(vsign_mask, vx0, vy0);
vy1 = vbslq_f16(vsign_mask, vx1, vy1);
vy2 = vbslq_f16(vsign_mask, vx2, vy2);
vy3 = vbslq_f16(vsign_mask, vx3, vy3);
vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8;
vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8;
}
for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8;
}
if (n != 0) {
const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8;
float16x8_t vz = vabsq_f16(vx);
vz = vminq_f16(vz, vsat_cutoff);
float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
vn = vsubq_f16(vn, vmagic_bias);
const float16x8_t vt = vfmaq_f16(vz, vn, vln2);
float16x8_t vp = vfmaq_f16(vc2, vc3, vt);
vp = vfmsq_f16(vtwo, vp, vt);
const float16x8_t vts = vmulq_f16(vt, vs);
const float16x8_t vsmo = vaddq_f16(vs, vminus_one);
const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts);
const float16x8_t vepo = vaddq_f16(vemo, vtwo);
float16x8_t vy = vdivq_f16(vemo, vepo);
vy = vbslq_f16(vsign_mask, vx, vy);
float16x4_t vy_lo = vget_low_f16(vy);
if (n & 4 * sizeof(uint16_t)) {
vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4;
vy_lo = vget_high_f16(vy);
}
if (n & 2 * sizeof(uint16_t)) {
vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2;
vy_lo = vext_f16(vy_lo, vy_lo, 2);
}
if (n & 1 * sizeof(uint16_t)) {
vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0);
}
}
}
| 14,133 | 37.2 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/amalgam/gen/scalar.c
|
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <fxdiv.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <xnnpack/common.h>
#include <xnnpack/lut.h>
#include <xnnpack/math.h>
#include <xnnpack/packw.h>
#include <xnnpack/transpose.h>
#include <xnnpack/vunary.h>
static inline uint32_t compute_sum(
size_t n,
const uint8_t* x,
const uint32_t* t)
{
assert(n != 0);
uint32_t vsum = 0;
do {
const size_t vx = *x++;
vsum += t[vx];
} while (--n != 0);
return vsum;
}
void xnn_u8_lut32norm_ukernel__scalar(
size_t n,
const uint8_t* x,
const uint32_t* t,
uint8_t* y)
{
assert(n != 0);
const uint32_t vsum = compute_sum(n, x, t);
assert(vsum != 0);
struct fxdiv_divisor_uint32_t vsum_divisor = fxdiv_init_uint32_t(vsum);
const uint32_t vrounding = (vsum >> 1);
do {
const size_t vx = *x++;
const uint32_t vt = t[vx];
const uint32_t vq = fxdiv_quotient_uint32_t((vt << 8) + vrounding, vsum_divisor);
const uint8_t vy = vq > 255 ? UINT8_C(255) : (uint8_t) vq;
*y++ = vy;
} while (--n != 0);
}
void xnn_x24_transposec_ukernel__1x2_scalar(
const void *input,
void * output,
size_t input_stride,
size_t output_stride,
size_t block_width,
size_t block_height,
const union xnn_x24_transpose_params* params) XNN_OOB_READS
{
assert(output_stride >= block_height * 3);
assert(input_stride >= block_width * 3);
const size_t input_reset = 6 - block_height * input_stride;
const size_t output_reset = 2 * output_stride - block_height * 3;
const size_t input_offset = 1 * input_stride;
const uint8_t* i0 = (const uint8_t*) input;
uint8_t* o0 = (uint8_t*) output;
uint8_t* o1 = (uint8_t*) ((uintptr_t) o0 + output_stride);
do {
if XNN_UNPREDICTABLE(block_width < 2) {
o1 = o0;
}
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
o1[0] = i0[3];
o1[1] = i0[4];
o1[2] = i0[5];
o1 += 3;
o0[0] = i0[0];
o0[1] = i0[1];
o0[2] = i0[2];
o0 += 3;
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
i0 = (const uint8_t*) ((uintptr_t) i0 + input_reset);
o0 = (uint8_t*) ((uintptr_t) o0 + output_reset);
o1 = (uint8_t*) ((uintptr_t) o1 + output_reset);
block_width = doz(block_width, 2);
} while (block_width != 0);
}
void xnn_x32_packw_gemm_goi_ukernel_x2__scalar_float_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 2);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
float* out = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// NC main loop multiple of 2
const float* w0 = (const float*) weights;
size_t n = nc;
for (;n >= 2; n -= 2) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
b += 2;
} else {
out[0] = 0;
out[1] = 0;
}
out += 2;
const float* w1 = w0 + kc;
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v01;
out[3] = v11;
out[4] = v02;
out[5] = v12;
out[6] = v03;
out[7] = v13;
out += 8;
}
// KC remainder
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
out += 2;
}
out = (float*) ((uintptr_t) out + extra_bytes);
w0 = w1;
}
// NC remainder (1..1)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (2 - n);
// KC main loop multiple of 2x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
out[0] = v00;
out[2] = v01;
out[4] = v02;
out[6] = v03;
out += 8;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
out += 2;
}
out = (float*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
void xnn_x32_packw_gemm_goi_ukernel_x4__scalar_float_x4(
size_t g,
size_t nc,
size_t kc,
size_t nr,
size_t kr,
size_t sr,
const uint32_t* weights,
const uint32_t* bias,
uint32_t* packed_weights,
size_t extra_bytes,
const void* params)
{
assert(g != 0);
assert(nc != 0);
assert(kc != 0);
assert(nr == 4);
assert(kr == 1);
assert(sr == 1);
assert(weights != NULL);
assert(packed_weights != NULL);
float* out = (float*) packed_weights;
const float* b = (const float*) bias;
do {
// NC main loop multiple of 4
const float* w0 = (const float*) weights;
size_t n = nc;
for (;n >= 4; n -= 4) {
if XNN_LIKELY(b != NULL) {
out[0] = b[0];
out[1] = b[1];
out[2] = b[2];
out[3] = b[3];
b += 4;
} else {
out[0] = 0;
out[1] = 0;
out[2] = 0;
out[3] = 0;
}
out += 4;
const float* w1 = w0 + kc;
const float* w2 = w1 + kc;
const float* w3 = w2 + kc;
// KC main loop multiple of 4x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
const float v20 = w2[0];
const float v21 = w2[1];
const float v22 = w2[2];
const float v23 = w2[3];
w2 += 4;
const float v30 = w3[0];
const float v31 = w3[1];
const float v32 = w3[2];
const float v33 = w3[3];
w3 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[3] = v30;
out[4] = v01;
out[5] = v11;
out[6] = v21;
out[7] = v31;
out[8] = v02;
out[9] = v12;
out[10] = v22;
out[11] = v32;
out[12] = v03;
out[13] = v13;
out[14] = v23;
out[15] = v33;
out += 16;
}
// KC remainder
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
const float v2 = *w2++;
out[2] = v2;
const float v3 = *w3++;
out[3] = v3;
out += 4;
}
out = (float*) ((uintptr_t) out + extra_bytes);
w0 = w3;
}
// NC remainder (1..3)
if XNN_UNLIKELY(n != 0) {
if XNN_LIKELY(b != NULL) {
size_t nb = n;
do {
*out++ = *b++;
} while (--nb != 0);
} else {
size_t nb = n;
do {
*out++ = 0;
} while (--nb != 0);
}
out += (4 - n);
// NR remainder has less than 4 rows so last row is not loaded
const float* w1 = w0 + kc;
if XNN_UNPREDICTABLE(n < 2) {
w1 = w0;
}
const float* w2 = w1 + kc;
if XNN_UNPREDICTABLE(n <= 2) {
w2 = w1;
}
// KC main loop multiple of 4x4
size_t k = kc;
for (; k >= 4; k -= 4) {
const float v00 = w0[0];
const float v01 = w0[1];
const float v02 = w0[2];
const float v03 = w0[3];
w0 += 4;
const float v10 = w1[0];
const float v11 = w1[1];
const float v12 = w1[2];
const float v13 = w1[3];
w1 += 4;
const float v20 = w2[0];
const float v21 = w2[1];
const float v22 = w2[2];
const float v23 = w2[3];
w2 += 4;
out[0] = v00;
out[1] = v10;
out[2] = v20;
out[4] = v01;
out[5] = v11;
out[6] = v21;
out[8] = v02;
out[9] = v12;
out[10] = v22;
out[12] = v03;
out[13] = v13;
out[14] = v23;
out += 16;
}
// KC remainder of 1..3
for (; k != 0; --k) {
const float v0 = *w0++;
out[0] = v0;
const float v1 = *w1++;
out[1] = v1;
const float v2 = *w2++;
out[2] = v2;
out += 4;
}
out = (float*) ((uintptr_t) out + extra_bytes);
}
weights += nc * kc;
} while (--g != 0);
}
void xnn_x8_lut_ukernel__scalar_x4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const uint8_t table[restrict XNN_MIN_ELEMENTS(256)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
const size_t vx0 = (size_t) input[0];
const size_t vx1 = (size_t) input[1];
const size_t vx2 = (size_t) input[2];
const size_t vx3 = (size_t) input[3];
input += 4;
const uint32_t vt0 = (uint32_t) table[vx0];
const uint32_t vt1 = (uint32_t) table[vx1];
const uint32_t vt2 = (uint32_t) table[vx2];
const uint32_t vt3 = (uint32_t) table[vx3];
output[0] = (uint8_t) vt0;
output[1] = (uint8_t) vt1;
output[2] = (uint8_t) vt2;
output[3] = (uint8_t) vt3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const size_t vx = (size_t) *input++;
const uint32_t vt = (uint32_t) table[vx];
*output++ = (uint8_t) vt;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
void xnn_xx_copy_ukernel__scalar_memcpy(size_t batch, const void* input, void* output, const void* params) {
assert(batch != 0);
assert(input != NULL);
assert(output != NULL);
memcpy(output, input, batch);
}
void xnn_xx_transposev_ukernel__1x1_scalar_memcpy(
const void* input,
void* output,
size_t input_row_stride,
size_t output_row_stride,
size_t input_element_stride,
size_t output_element_stride,
size_t element_size,
size_t block_width,
size_t block_height)
{
const size_t input_reset = input_element_stride - block_height * input_row_stride;
const size_t output_reset = output_row_stride - block_height * output_element_stride;
const void* i = (const void*) input;
void* o = (void*) output;
do {
size_t bh = block_height;
for (; bh >= 1; bh -= 1) {
memcpy(o, i, element_size);
i = (const void*) ((uintptr_t) i + input_row_stride);
o = (void*) ((uintptr_t) o + output_element_stride);
}
i = (const void*) ((uintptr_t) i + input_reset);
o = (void*) ((uintptr_t) o + output_reset);
block_width -= 1;
} while (block_width != 0);
}
| 11,553 | 23.070833 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-1x4c8-minmax-neonbf16-bfdot.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neonbf16.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_1x4c8__neonbf16_bfdot(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(bfloat16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const bfloat16_t* a0 = (const bfloat16_t*) a;
bfloat16_t* c0 = (bfloat16_t*) c;
const bfloat16_t* w = (const bfloat16_t*) w_ptr;
do {
float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
size_t k = kc;
for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
vacc0x0 = vbfdotq_f32(vacc0x0, va0, vb0);
vacc0x1 = vbfdotq_f32(vacc0x1, va0, vb1);
vacc0x2 = vbfdotq_f32(vacc0x2, va0, vb2);
vacc0x3 = vbfdotq_f32(vacc0x3, va0, vb3);
}
if XNN_UNLIKELY(k != 0) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
vacc0x0 = vbfdotq_f32(vacc0x0, va0x0, vb0);
const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
vacc0x1 = vbfdotq_f32(vacc0x1, va0x1, vb1);
const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
vacc0x2 = vbfdotq_f32(vacc0x2, va0x2, vb2);
const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
vacc0x3 = vbfdotq_f32(vacc0x3, va0x3, vb3);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
if XNN_LIKELY(nc >= 4) {
vst1_bf16(c0, vout0x0123);
c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
}
if (nc & 1) {
vst1_lane_bf16(c0, vout0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,819 | 36.364341 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-1x4c8-minmax-neonbf16-bfmlal.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neonbf16.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_1x4c8__neonbf16_bfmlal(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(bfloat16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const bfloat16_t* a0 = (const bfloat16_t*) a;
bfloat16_t* c0 = (bfloat16_t*) c;
const bfloat16_t* w = (const bfloat16_t*) w_ptr;
do {
float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
size_t k = kc;
for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
vacc0x0 = vbfmlalbq_f32(vacc0x0, va0, vb0);
vacc0x1 = vbfmlalbq_f32(vacc0x1, va0, vb1);
vacc0x2 = vbfmlalbq_f32(vacc0x2, va0, vb2);
vacc0x3 = vbfmlalbq_f32(vacc0x3, va0, vb3);
vacc0x0 = vbfmlaltq_f32(vacc0x0, va0, vb0);
vacc0x1 = vbfmlaltq_f32(vacc0x1, va0, vb1);
vacc0x2 = vbfmlaltq_f32(vacc0x2, va0, vb2);
vacc0x3 = vbfmlaltq_f32(vacc0x3, va0, vb3);
}
if XNN_UNLIKELY(k != 0) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
vacc0x0 = vbfmlalbq_f32(vacc0x0, va0x0, vb0);
vacc0x0 = vbfmlaltq_f32(vacc0x0, va0x0, vb0);
const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
vacc0x1 = vbfmlalbq_f32(vacc0x1, va0x1, vb1);
vacc0x1 = vbfmlaltq_f32(vacc0x1, va0x1, vb1);
const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
vacc0x2 = vbfmlalbq_f32(vacc0x2, va0x2, vb2);
vacc0x2 = vbfmlaltq_f32(vacc0x2, va0x2, vb2);
const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
vacc0x3 = vbfmlalbq_f32(vacc0x3, va0x3, vb3);
vacc0x3 = vbfmlaltq_f32(vacc0x3, va0x3, vb3);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
if XNN_LIKELY(nc >= 4) {
vst1_bf16(c0, vout0x0123);
c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
}
if (nc & 1) {
vst1_lane_bf16(c0, vout0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,245 | 37.014493 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-1x4c8-minmax-neonfma-shland.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neon-shland.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_1x4c8__neonfma_shland(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* w = (const uint16_t*) w_ptr;
const uint16x8_t vmask = vreinterpretq_u16_u32(vmovq_n_u32(UINT32_C(0xFFFF0000)));
do {
float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
size_t k = kc;
for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) {
const uint16x8_t va0 = vld1q_u16(a0); a0 += 8;
const uint16x8_t vb0 = vld1q_u16(w); w += 8;
const uint16x8_t vb1 = vld1q_u16(w); w += 8;
const uint16x8_t vb2 = vld1q_u16(w); w += 8;
const uint16x8_t vb3 = vld1q_u16(w); w += 8;
const float32x4_t va0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0), 16));
const float32x4_t vb0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb0), 16));
const float32x4_t vb1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb1), 16));
const float32x4_t vb2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb2), 16));
const float32x4_t vb3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb3), 16));
vacc0x0 = vfmaq_f32(vacc0x0, va0e, vb0e);
vacc0x1 = vfmaq_f32(vacc0x1, va0e, vb1e);
vacc0x2 = vfmaq_f32(vacc0x2, va0e, vb2e);
vacc0x3 = vfmaq_f32(vacc0x3, va0e, vb3e);
const float32x4_t va0o = vreinterpretq_f32_u16(vandq_u16(va0, vmask));
const float32x4_t vb0o = vreinterpretq_f32_u16(vandq_u16(vb0, vmask));
const float32x4_t vb1o = vreinterpretq_f32_u16(vandq_u16(vb1, vmask));
const float32x4_t vb2o = vreinterpretq_f32_u16(vandq_u16(vb2, vmask));
const float32x4_t vb3o = vreinterpretq_f32_u16(vandq_u16(vb3, vmask));
vacc0x0 = vfmaq_f32(vacc0x0, va0o, vb0o);
vacc0x1 = vfmaq_f32(vacc0x1, va0o, vb1o);
vacc0x2 = vfmaq_f32(vacc0x2, va0o, vb2o);
vacc0x3 = vfmaq_f32(vacc0x3, va0o, vb3o);
}
if XNN_UNLIKELY(k != 0) {
const uint16x8_t va0 = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k);
const uint16x8_t vb0 = vld1q_u16(w); w += 8;
const uint16x8_t vb1 = vld1q_u16(w); w += 8;
const uint16x8_t vb2 = vld1q_u16(w); w += 8;
const uint16x8_t vb3 = vld1q_u16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vb0, vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vb1, vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vb2, vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vb3, vmovq_n_u16(0));
const float32x4_t vb0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb0), 16));
const float32x4_t vb1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb1), 16));
const float32x4_t vb2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb2), 16));
const float32x4_t vb3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb3), 16));
const uint16x8_t va0x0 = vbicq_u16(va0, vm0);
const uint16x8_t va0x1 = vbicq_u16(va0, vm1);
const uint16x8_t va0x2 = vbicq_u16(va0, vm2);
const uint16x8_t va0x3 = vbicq_u16(va0, vm3);
const float32x4_t va0x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x0), 16));
const float32x4_t va0x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x1), 16));
const float32x4_t va0x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x2), 16));
const float32x4_t va0x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x3), 16));
vacc0x0 = vfmaq_f32(vacc0x0, va0x0e, vb0e);
vacc0x1 = vfmaq_f32(vacc0x1, va0x1e, vb1e);
vacc0x2 = vfmaq_f32(vacc0x2, va0x2e, vb2e);
vacc0x3 = vfmaq_f32(vacc0x3, va0x3e, vb3e);
const float32x4_t vb0o = vreinterpretq_f32_u16(vandq_u16(vb0, vmask));
const float32x4_t vb1o = vreinterpretq_f32_u16(vandq_u16(vb1, vmask));
const float32x4_t vb2o = vreinterpretq_f32_u16(vandq_u16(vb2, vmask));
const float32x4_t vb3o = vreinterpretq_f32_u16(vandq_u16(vb3, vmask));
const float32x4_t va0x0o = vreinterpretq_f32_u16(vandq_u16(va0x0, vmask));
const float32x4_t va0x1o = vreinterpretq_f32_u16(vandq_u16(va0x1, vmask));
const float32x4_t va0x2o = vreinterpretq_f32_u16(vandq_u16(va0x2, vmask));
const float32x4_t va0x3o = vreinterpretq_f32_u16(vandq_u16(va0x3, vmask));
vacc0x0 = vfmaq_f32(vacc0x0, va0x0o, vb0o);
vacc0x1 = vfmaq_f32(vacc0x1, va0x1o, vb1o);
vacc0x2 = vfmaq_f32(vacc0x2, va0x2o, vb2o);
vacc0x3 = vfmaq_f32(vacc0x3, va0x3o, vb3o);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16);
if XNN_LIKELY(nc >= 4) {
vst1_u16(c0, vout0x0123);
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2;
vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vout0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,235 | 40.348571 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-1x4c8-minmax-neonfma-zip.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_1x4c8__neonfma_zip(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* w = (const uint16_t*) w_ptr;
const uint16x8_t vzero = vmovq_n_u16(0);
do {
float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
size_t k = kc;
for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) {
const uint16x8_t va0h = vld1q_u16(a0); a0 += 8;
const uint16x8_t vb0h = vld1q_u16(w); w += 8;
const uint16x8_t vb1h = vld1q_u16(w); w += 8;
const uint16x8_t vb2h = vld1q_u16(w); w += 8;
const uint16x8_t vb3h = vld1q_u16(w); w += 8;
const uint16x8x2_t va0f = vzipq_u16(vzero, va0h);
const uint16x8x2_t vb0f = vzipq_u16(vzero, vb0h);
const uint16x8x2_t vb1f = vzipq_u16(vzero, vb1h);
const uint16x8x2_t vb2f = vzipq_u16(vzero, vb2h);
const uint16x8x2_t vb3f = vzipq_u16(vzero, vb3h);
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
}
if XNN_UNLIKELY(k != 0) {
const uint16x8_t va0h = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k);
const uint16x8_t vb0h = vld1q_u16(w); w += 8;
const uint16x8_t vb1h = vld1q_u16(w); w += 8;
const uint16x8_t vb2h = vld1q_u16(w); w += 8;
const uint16x8_t vb3h = vld1q_u16(w); w += 8;
const uint16x8_t vm0h = vceqq_u16(vb0h, vmovq_n_u16(0));
const uint16x8_t vm1h = vceqq_u16(vb1h, vmovq_n_u16(0));
const uint16x8_t vm2h = vceqq_u16(vb2h, vmovq_n_u16(0));
const uint16x8_t vm3h = vceqq_u16(vb3h, vmovq_n_u16(0));
const uint16x8x2_t vb0f = vzipq_u16(vzero, vb0h);
const uint16x8x2_t vb1f = vzipq_u16(vzero, vb1h);
const uint16x8x2_t vb2f = vzipq_u16(vzero, vb2h);
const uint16x8x2_t vb3f = vzipq_u16(vzero, vb3h);
const uint16x8_t va0x0h = vbicq_u16(va0h, vm0h);
const uint16x8_t va0x1h = vbicq_u16(va0h, vm1h);
const uint16x8_t va0x2h = vbicq_u16(va0h, vm2h);
const uint16x8_t va0x3h = vbicq_u16(va0h, vm3h);
const uint16x8x2_t va0x0f = vzipq_u16(vzero, va0x0h);
const uint16x8x2_t va0x1f = vzipq_u16(vzero, va0x1h);
const uint16x8x2_t va0x2f = vzipq_u16(vzero, va0x2h);
const uint16x8x2_t va0x3f = vzipq_u16(vzero, va0x3h);
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16);
if XNN_LIKELY(nc >= 4) {
vst1_u16(c0, vout0x0123);
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2;
vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vout0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,597 | 40.759494 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-1x8c2-minmax-neonbf16-bfdot-lane-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c2-neonbf16-bfdot-lane-ld128.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_1x8c2__neonbf16_bfdot_lane_ld128(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(bfloat16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const bfloat16_t* a0 = (const bfloat16_t*) a;
bfloat16_t* c0 = (bfloat16_t*) c;
const bfloat16_t* w = (const bfloat16_t*) w_ptr;
do {
float32x4_t vacc0x0123 = vcvt_f32_bf16(vld1_bf16(w)); w += 4;
float32x4_t vacc0x4567 = vcvt_f32_bf16(vld1_bf16(w)); w += 4;
size_t k = kc;
for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
const bfloat16x8_t vb0123c01 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb4567c01 = vld1q_bf16(w); w += 8;
vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c01, va0, 0);
vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c01, va0, 0);
const bfloat16x8_t vb0123c23 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb4567c23 = vld1q_bf16(w); w += 8;
vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c23, va0, 1);
vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c23, va0, 1);
const bfloat16x8_t vb0123c45 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb4567c45 = vld1q_bf16(w); w += 8;
vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c45, va0, 2);
vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c45, va0, 2);
const bfloat16x8_t vb0123c67 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb4567c67 = vld1q_bf16(w); w += 8;
vacc0x0123 = vbfdotq_laneq_f32(vacc0x0123, vb0123c67, va0, 3);
vacc0x4567 = vbfdotq_laneq_f32(vacc0x4567, vb4567c67, va0, 3);
}
if XNN_UNLIKELY(k != 0) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
const bfloat16x8_t vb0123c01 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb4567c01 = vld1q_bf16(w); w += 8;
const uint32x4_t va0c01 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va0)), 0);
const uint32x4_t vm0123c01 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c01), vmovq_n_u16(0)));
const uint32x4_t vm4567c01 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c01), vmovq_n_u16(0)));
const uint32x4_t va0x0123c01 = vbicq_u32(va0c01, vm0123c01);
vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c01, vreinterpretq_bf16_u32(va0x0123c01));
const uint32x4_t va0x4567c01 = vbicq_u32(va0c01, vm4567c01);
vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c01, vreinterpretq_bf16_u32(va0x4567c01));
if (k > 2 * sizeof(bfloat16_t)) {
const bfloat16x8_t vb0123c23 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb4567c23 = vld1q_bf16(w); w += 8;
const uint32x4_t va0c23 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_low_bf16(va0)), 1);
const uint32x4_t vm0123c23 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c23), vmovq_n_u16(0)));
const uint32x4_t vm4567c23 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c23), vmovq_n_u16(0)));
const uint32x4_t va0x0123c23 = vbicq_u32(va0c23, vm0123c23);
vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c23, vreinterpretq_bf16_u32(va0x0123c23));
const uint32x4_t va0x4567c23 = vbicq_u32(va0c23, vm4567c23);
vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c23, vreinterpretq_bf16_u32(va0x4567c23));
if (k > 4 * sizeof(bfloat16_t)) {
const bfloat16x8_t vb0123c45 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb4567c45 = vld1q_bf16(w); w += 8;
const uint32x4_t va0c45 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va0)), 0);
const uint32x4_t vm0123c45 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c45), vmovq_n_u16(0)));
const uint32x4_t vm4567c45 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c45), vmovq_n_u16(0)));
const uint32x4_t va0x0123c45 = vbicq_u32(va0c45, vm0123c45);
vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c45, vreinterpretq_bf16_u32(va0x0123c45));
const uint32x4_t va0x4567c45 = vbicq_u32(va0c45, vm4567c45);
vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c45, vreinterpretq_bf16_u32(va0x4567c45));
if (k > 6 * sizeof(bfloat16_t)) {
const bfloat16x8_t vb0123c67 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb4567c67 = vld1q_bf16(w); w += 8;
const uint32x4_t va0c67 = vdupq_lane_u32(vreinterpret_u32_bf16(vget_high_bf16(va0)), 1);
const uint32x4_t vm0123c67 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb0123c67), vmovq_n_u16(0)));
const uint32x4_t vm4567c67 = vreinterpretq_u32_u16(vceqq_u16(vreinterpretq_u16_bf16(vb4567c67), vmovq_n_u16(0)));
const uint32x4_t va0x0123c67 = vbicq_u32(va0c67, vm0123c67);
vacc0x0123 = vbfdotq_f32(vacc0x0123, vb0123c67, vreinterpretq_bf16_u32(va0x0123c67));
const uint32x4_t va0x4567c67 = vbicq_u32(va0c67, vm4567c67);
vacc0x4567 = vbfdotq_f32(vacc0x4567, vb4567c67, vreinterpretq_bf16_u32(va0x4567c67));
}
}
}
}
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc0x4567 = vminq_f32(vacc0x4567, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
bfloat16x4_t vout0x4567 = vcvt_bf16_f32(vacc0x4567);
if XNN_LIKELY(nc >= 8) {
vst1_bf16(c0, vout0x0123);
vst1_bf16(c0 + 4, vout0x4567);
c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
nc -= 8;
} else {
if (nc & 4) {
vst1_bf16(c0, vout0x0123); c0 += 4;
vout0x0123 = vout0x4567;
}
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
}
if (nc & 1) {
vst1_lane_bf16(c0, vout0x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,986 | 39.622093 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-2x4c8-minmax-neonbf16-bfdot.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neonbf16.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_2x4c8__neonbf16_bfdot(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(bfloat16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const bfloat16_t* a0 = (const bfloat16_t*) a;
bfloat16_t* c0 = (bfloat16_t*) c;
const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride);
bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const bfloat16_t* w = (const bfloat16_t*) w_ptr;
do {
float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8;
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
vacc0x0 = vbfdotq_f32(vacc0x0, va0, vb0);
vacc1x0 = vbfdotq_f32(vacc1x0, va1, vb0);
vacc0x1 = vbfdotq_f32(vacc0x1, va0, vb1);
vacc1x1 = vbfdotq_f32(vacc1x1, va1, vb1);
vacc0x2 = vbfdotq_f32(vacc0x2, va0, vb2);
vacc1x2 = vbfdotq_f32(vacc1x2, va1, vb2);
vacc0x3 = vbfdotq_f32(vacc0x3, va0, vb3);
vacc1x3 = vbfdotq_f32(vacc1x3, va1, vb3);
}
if XNN_UNLIKELY(k != 0) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k);
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
vacc0x0 = vbfdotq_f32(vacc0x0, va0x0, vb0);
const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0));
vacc1x0 = vbfdotq_f32(vacc1x0, va1x0, vb0);
const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
vacc0x1 = vbfdotq_f32(vacc0x1, va0x1, vb1);
const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1));
vacc1x1 = vbfdotq_f32(vacc1x1, va1x1, vb1);
const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
vacc0x2 = vbfdotq_f32(vacc0x2, va0x2, vb2);
const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2));
vacc1x2 = vbfdotq_f32(vacc1x2, va1x2, vb2);
const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
vacc0x3 = vbfdotq_f32(vacc0x3, va0x3, vb3);
const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3));
vacc1x3 = vbfdotq_f32(vacc1x3, va1x3, vb3);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123);
if XNN_LIKELY(nc >= 4) {
vst1_bf16(c0, vout0x0123);
c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
vst1_bf16(c1, vout1x0123);
c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride);
a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2;
vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2));
}
if (nc & 1) {
vst1_lane_bf16(c0, vout0x0123, 0);
vst1_lane_bf16(c1, vout1x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,297 | 41.929412 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-2x4c8-minmax-neonbf16-bfmlal.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neonbf16.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_2x4c8__neonbf16_bfmlal(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(bfloat16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const bfloat16_t* a0 = (const bfloat16_t*) a;
bfloat16_t* c0 = (bfloat16_t*) c;
const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride);
bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const bfloat16_t* w = (const bfloat16_t*) w_ptr;
do {
float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8;
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
vacc0x0 = vbfmlalbq_f32(vacc0x0, va0, vb0);
vacc1x0 = vbfmlalbq_f32(vacc1x0, va1, vb0);
vacc0x1 = vbfmlalbq_f32(vacc0x1, va0, vb1);
vacc1x1 = vbfmlalbq_f32(vacc1x1, va1, vb1);
vacc0x2 = vbfmlalbq_f32(vacc0x2, va0, vb2);
vacc1x2 = vbfmlalbq_f32(vacc1x2, va1, vb2);
vacc0x3 = vbfmlalbq_f32(vacc0x3, va0, vb3);
vacc1x3 = vbfmlalbq_f32(vacc1x3, va1, vb3);
vacc0x0 = vbfmlaltq_f32(vacc0x0, va0, vb0);
vacc1x0 = vbfmlaltq_f32(vacc1x0, va1, vb0);
vacc0x1 = vbfmlaltq_f32(vacc0x1, va0, vb1);
vacc1x1 = vbfmlaltq_f32(vacc1x1, va1, vb1);
vacc0x2 = vbfmlaltq_f32(vacc0x2, va0, vb2);
vacc1x2 = vbfmlaltq_f32(vacc1x2, va1, vb2);
vacc0x3 = vbfmlaltq_f32(vacc0x3, va0, vb3);
vacc1x3 = vbfmlaltq_f32(vacc1x3, va1, vb3);
}
if XNN_UNLIKELY(k != 0) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k);
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
vacc0x0 = vbfmlalbq_f32(vacc0x0, va0x0, vb0);
vacc0x0 = vbfmlaltq_f32(vacc0x0, va0x0, vb0);
const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0));
vacc1x0 = vbfmlalbq_f32(vacc1x0, va1x0, vb0);
vacc1x0 = vbfmlaltq_f32(vacc1x0, va1x0, vb0);
const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
vacc0x1 = vbfmlalbq_f32(vacc0x1, va0x1, vb1);
vacc0x1 = vbfmlaltq_f32(vacc0x1, va0x1, vb1);
const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1));
vacc1x1 = vbfmlalbq_f32(vacc1x1, va1x1, vb1);
vacc1x1 = vbfmlaltq_f32(vacc1x1, va1x1, vb1);
const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
vacc0x2 = vbfmlalbq_f32(vacc0x2, va0x2, vb2);
vacc0x2 = vbfmlaltq_f32(vacc0x2, va0x2, vb2);
const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2));
vacc1x2 = vbfmlalbq_f32(vacc1x2, va1x2, vb2);
vacc1x2 = vbfmlaltq_f32(vacc1x2, va1x2, vb2);
const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
vacc0x3 = vbfmlalbq_f32(vacc0x3, va0x3, vb3);
vacc0x3 = vbfmlaltq_f32(vacc0x3, va0x3, vb3);
const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3));
vacc1x3 = vbfmlalbq_f32(vacc1x3, va1x3, vb3);
vacc1x3 = vbfmlaltq_f32(vacc1x3, va1x3, vb3);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123);
if XNN_LIKELY(nc >= 4) {
vst1_bf16(c0, vout0x0123);
c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
vst1_bf16(c1, vout1x0123);
c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride);
a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2;
vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2));
}
if (nc & 1) {
vst1_lane_bf16(c0, vout0x0123, 0);
vst1_lane_bf16(c1, vout1x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,147 | 42.572193 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-2x4c8-minmax-neonfma-shland.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neon-shland.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_2x4c8__neonfma_shland(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* w = (const uint16_t*) w_ptr;
const uint16x8_t vmask = vreinterpretq_u16_u32(vmovq_n_u32(UINT32_C(0xFFFF0000)));
do {
float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) {
const uint16x8_t va0 = vld1q_u16(a0); a0 += 8;
const uint16x8_t va1 = vld1q_u16(a1); a1 += 8;
const uint16x8_t vb0 = vld1q_u16(w); w += 8;
const uint16x8_t vb1 = vld1q_u16(w); w += 8;
const uint16x8_t vb2 = vld1q_u16(w); w += 8;
const uint16x8_t vb3 = vld1q_u16(w); w += 8;
const float32x4_t va0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0), 16));
const float32x4_t va1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1), 16));
const float32x4_t vb0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb0), 16));
const float32x4_t vb1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb1), 16));
const float32x4_t vb2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb2), 16));
const float32x4_t vb3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb3), 16));
vacc0x0 = vfmaq_f32(vacc0x0, va0e, vb0e);
vacc1x0 = vfmaq_f32(vacc1x0, va1e, vb0e);
vacc0x1 = vfmaq_f32(vacc0x1, va0e, vb1e);
vacc1x1 = vfmaq_f32(vacc1x1, va1e, vb1e);
vacc0x2 = vfmaq_f32(vacc0x2, va0e, vb2e);
vacc1x2 = vfmaq_f32(vacc1x2, va1e, vb2e);
vacc0x3 = vfmaq_f32(vacc0x3, va0e, vb3e);
vacc1x3 = vfmaq_f32(vacc1x3, va1e, vb3e);
const float32x4_t va0o = vreinterpretq_f32_u16(vandq_u16(va0, vmask));
const float32x4_t va1o = vreinterpretq_f32_u16(vandq_u16(va1, vmask));
const float32x4_t vb0o = vreinterpretq_f32_u16(vandq_u16(vb0, vmask));
const float32x4_t vb1o = vreinterpretq_f32_u16(vandq_u16(vb1, vmask));
const float32x4_t vb2o = vreinterpretq_f32_u16(vandq_u16(vb2, vmask));
const float32x4_t vb3o = vreinterpretq_f32_u16(vandq_u16(vb3, vmask));
vacc0x0 = vfmaq_f32(vacc0x0, va0o, vb0o);
vacc1x0 = vfmaq_f32(vacc1x0, va1o, vb0o);
vacc0x1 = vfmaq_f32(vacc0x1, va0o, vb1o);
vacc1x1 = vfmaq_f32(vacc1x1, va1o, vb1o);
vacc0x2 = vfmaq_f32(vacc0x2, va0o, vb2o);
vacc1x2 = vfmaq_f32(vacc1x2, va1o, vb2o);
vacc0x3 = vfmaq_f32(vacc0x3, va0o, vb3o);
vacc1x3 = vfmaq_f32(vacc1x3, va1o, vb3o);
}
if XNN_UNLIKELY(k != 0) {
const uint16x8_t va0 = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k);
const uint16x8_t va1 = vld1q_u16(a1); a1 = (const uint16_t*) ((uintptr_t) a1 + k);
const uint16x8_t vb0 = vld1q_u16(w); w += 8;
const uint16x8_t vb1 = vld1q_u16(w); w += 8;
const uint16x8_t vb2 = vld1q_u16(w); w += 8;
const uint16x8_t vb3 = vld1q_u16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vb0, vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vb1, vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vb2, vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vb3, vmovq_n_u16(0));
const float32x4_t vb0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb0), 16));
const float32x4_t vb1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb1), 16));
const float32x4_t vb2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb2), 16));
const float32x4_t vb3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb3), 16));
const uint16x8_t va0x0 = vbicq_u16(va0, vm0);
const uint16x8_t va1x0 = vbicq_u16(va1, vm0);
const uint16x8_t va0x1 = vbicq_u16(va0, vm1);
const uint16x8_t va1x1 = vbicq_u16(va1, vm1);
const uint16x8_t va0x2 = vbicq_u16(va0, vm2);
const uint16x8_t va1x2 = vbicq_u16(va1, vm2);
const uint16x8_t va0x3 = vbicq_u16(va0, vm3);
const uint16x8_t va1x3 = vbicq_u16(va1, vm3);
const float32x4_t va0x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x0), 16));
const float32x4_t va1x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x0), 16));
const float32x4_t va0x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x1), 16));
const float32x4_t va1x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x1), 16));
const float32x4_t va0x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x2), 16));
const float32x4_t va1x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x2), 16));
const float32x4_t va0x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x3), 16));
const float32x4_t va1x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x3), 16));
vacc0x0 = vfmaq_f32(vacc0x0, va0x0e, vb0e);
vacc1x0 = vfmaq_f32(vacc1x0, va1x0e, vb0e);
vacc0x1 = vfmaq_f32(vacc0x1, va0x1e, vb1e);
vacc1x1 = vfmaq_f32(vacc1x1, va1x1e, vb1e);
vacc0x2 = vfmaq_f32(vacc0x2, va0x2e, vb2e);
vacc1x2 = vfmaq_f32(vacc1x2, va1x2e, vb2e);
vacc0x3 = vfmaq_f32(vacc0x3, va0x3e, vb3e);
vacc1x3 = vfmaq_f32(vacc1x3, va1x3e, vb3e);
const float32x4_t vb0o = vreinterpretq_f32_u16(vandq_u16(vb0, vmask));
const float32x4_t vb1o = vreinterpretq_f32_u16(vandq_u16(vb1, vmask));
const float32x4_t vb2o = vreinterpretq_f32_u16(vandq_u16(vb2, vmask));
const float32x4_t vb3o = vreinterpretq_f32_u16(vandq_u16(vb3, vmask));
const float32x4_t va0x0o = vreinterpretq_f32_u16(vandq_u16(va0x0, vmask));
const float32x4_t va1x0o = vreinterpretq_f32_u16(vandq_u16(va1x0, vmask));
const float32x4_t va0x1o = vreinterpretq_f32_u16(vandq_u16(va0x1, vmask));
const float32x4_t va1x1o = vreinterpretq_f32_u16(vandq_u16(va1x1, vmask));
const float32x4_t va0x2o = vreinterpretq_f32_u16(vandq_u16(va0x2, vmask));
const float32x4_t va1x2o = vreinterpretq_f32_u16(vandq_u16(va1x2, vmask));
const float32x4_t va0x3o = vreinterpretq_f32_u16(vandq_u16(va0x3, vmask));
const float32x4_t va1x3o = vreinterpretq_f32_u16(vandq_u16(va1x3, vmask));
vacc0x0 = vfmaq_f32(vacc0x0, va0x0o, vb0o);
vacc1x0 = vfmaq_f32(vacc1x0, va1x0o, vb0o);
vacc0x1 = vfmaq_f32(vacc0x1, va0x1o, vb1o);
vacc1x1 = vfmaq_f32(vacc1x1, va1x1o, vb1o);
vacc0x2 = vfmaq_f32(vacc0x2, va0x2o, vb2o);
vacc1x2 = vfmaq_f32(vacc1x2, va1x2o, vb2o);
vacc0x3 = vfmaq_f32(vacc0x3, va0x3o, vb3o);
vacc1x3 = vfmaq_f32(vacc1x3, va1x3o, vb3o);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16);
uint16x4_t vout1x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc1x0123), 16);
if XNN_LIKELY(nc >= 4) {
vst1_u16(c0, vout0x0123);
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1_u16(c1, vout1x0123);
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_u16(vout1x0123), 0); c1 += 2;
vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2);
vout1x0123 = vext_u16(vout1x0123, vout1x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vout0x0123, 0);
vst1_lane_u16(c1, vout1x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,748 | 44.935897 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-2x4c8-minmax-neonfma-zip.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_2x4c8__neonfma_zip(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* w = (const uint16_t*) w_ptr;
const uint16x8_t vzero = vmovq_n_u16(0);
do {
float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) {
const uint16x8_t va0h = vld1q_u16(a0); a0 += 8;
const uint16x8_t va1h = vld1q_u16(a1); a1 += 8;
const uint16x8_t vb0h = vld1q_u16(w); w += 8;
const uint16x8_t vb1h = vld1q_u16(w); w += 8;
const uint16x8_t vb2h = vld1q_u16(w); w += 8;
const uint16x8_t vb3h = vld1q_u16(w); w += 8;
const uint16x8x2_t va0f = vzipq_u16(vzero, va0h);
const uint16x8x2_t va1f = vzipq_u16(vzero, va1h);
const uint16x8x2_t vb0f = vzipq_u16(vzero, vb0h);
const uint16x8x2_t vb1f = vzipq_u16(vzero, vb1h);
const uint16x8x2_t vb2f = vzipq_u16(vzero, vb2h);
const uint16x8x2_t vb3f = vzipq_u16(vzero, vb3h);
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
}
if XNN_UNLIKELY(k != 0) {
const uint16x8_t va0h = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k);
const uint16x8_t va1h = vld1q_u16(a1); a1 = (const uint16_t*) ((uintptr_t) a1 + k);
const uint16x8_t vb0h = vld1q_u16(w); w += 8;
const uint16x8_t vb1h = vld1q_u16(w); w += 8;
const uint16x8_t vb2h = vld1q_u16(w); w += 8;
const uint16x8_t vb3h = vld1q_u16(w); w += 8;
const uint16x8_t vm0h = vceqq_u16(vb0h, vmovq_n_u16(0));
const uint16x8_t vm1h = vceqq_u16(vb1h, vmovq_n_u16(0));
const uint16x8_t vm2h = vceqq_u16(vb2h, vmovq_n_u16(0));
const uint16x8_t vm3h = vceqq_u16(vb3h, vmovq_n_u16(0));
const uint16x8x2_t vb0f = vzipq_u16(vzero, vb0h);
const uint16x8x2_t vb1f = vzipq_u16(vzero, vb1h);
const uint16x8x2_t vb2f = vzipq_u16(vzero, vb2h);
const uint16x8x2_t vb3f = vzipq_u16(vzero, vb3h);
const uint16x8_t va0x0h = vbicq_u16(va0h, vm0h);
const uint16x8_t va1x0h = vbicq_u16(va1h, vm0h);
const uint16x8_t va0x1h = vbicq_u16(va0h, vm1h);
const uint16x8_t va1x1h = vbicq_u16(va1h, vm1h);
const uint16x8_t va0x2h = vbicq_u16(va0h, vm2h);
const uint16x8_t va1x2h = vbicq_u16(va1h, vm2h);
const uint16x8_t va0x3h = vbicq_u16(va0h, vm3h);
const uint16x8_t va1x3h = vbicq_u16(va1h, vm3h);
const uint16x8x2_t va0x0f = vzipq_u16(vzero, va0x0h);
const uint16x8x2_t va1x0f = vzipq_u16(vzero, va1x0h);
const uint16x8x2_t va0x1f = vzipq_u16(vzero, va0x1h);
const uint16x8x2_t va1x1f = vzipq_u16(vzero, va1x1h);
const uint16x8x2_t va0x2f = vzipq_u16(vzero, va0x2h);
const uint16x8x2_t va1x2f = vzipq_u16(vzero, va1x2h);
const uint16x8x2_t va0x3f = vzipq_u16(vzero, va0x3h);
const uint16x8x2_t va1x3f = vzipq_u16(vzero, va1x3h);
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16);
uint16x4_t vout1x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc1x0123), 16);
if XNN_LIKELY(nc >= 4) {
vst1_u16(c0, vout0x0123);
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1_u16(c1, vout1x0123);
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_u16(vout1x0123), 0); c1 += 2;
vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2);
vout1x0123 = vext_u16(vout1x0123, vout1x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vout0x0123, 0);
vst1_lane_u16(c1, vout1x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,468 | 48.382075 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-3x4c8-minmax-neonbf16-bfdot.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neonbf16.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_3x4c8__neonbf16_bfdot(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(bfloat16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const bfloat16_t* a0 = (const bfloat16_t*) a;
bfloat16_t* c0 = (bfloat16_t*) c;
const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride);
bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride);
bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const bfloat16_t* w = (const bfloat16_t*) w_ptr;
do {
float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
float32x4_t vacc2x0 = vacc0x0;
float32x4_t vacc2x1 = vacc0x1;
float32x4_t vacc2x2 = vacc0x2;
float32x4_t vacc2x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8;
const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8;
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
vacc0x0 = vbfdotq_f32(vacc0x0, va0, vb0);
vacc1x0 = vbfdotq_f32(vacc1x0, va1, vb0);
vacc2x0 = vbfdotq_f32(vacc2x0, va2, vb0);
vacc0x1 = vbfdotq_f32(vacc0x1, va0, vb1);
vacc1x1 = vbfdotq_f32(vacc1x1, va1, vb1);
vacc2x1 = vbfdotq_f32(vacc2x1, va2, vb1);
vacc0x2 = vbfdotq_f32(vacc0x2, va0, vb2);
vacc1x2 = vbfdotq_f32(vacc1x2, va1, vb2);
vacc2x2 = vbfdotq_f32(vacc2x2, va2, vb2);
vacc0x3 = vbfdotq_f32(vacc0x3, va0, vb3);
vacc1x3 = vbfdotq_f32(vacc1x3, va1, vb3);
vacc2x3 = vbfdotq_f32(vacc2x3, va2, vb3);
}
if XNN_UNLIKELY(k != 0) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k);
const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k);
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
vacc0x0 = vbfdotq_f32(vacc0x0, va0x0, vb0);
const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0));
vacc1x0 = vbfdotq_f32(vacc1x0, va1x0, vb0);
const bfloat16x8_t va2x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm0));
vacc2x0 = vbfdotq_f32(vacc2x0, va2x0, vb0);
const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
vacc0x1 = vbfdotq_f32(vacc0x1, va0x1, vb1);
const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1));
vacc1x1 = vbfdotq_f32(vacc1x1, va1x1, vb1);
const bfloat16x8_t va2x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm1));
vacc2x1 = vbfdotq_f32(vacc2x1, va2x1, vb1);
const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
vacc0x2 = vbfdotq_f32(vacc0x2, va0x2, vb2);
const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2));
vacc1x2 = vbfdotq_f32(vacc1x2, va1x2, vb2);
const bfloat16x8_t va2x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm2));
vacc2x2 = vbfdotq_f32(vacc2x2, va2x2, vb2);
const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
vacc0x3 = vbfdotq_f32(vacc0x3, va0x3, vb3);
const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3));
vacc1x3 = vbfdotq_f32(vacc1x3, va1x3, vb3);
const bfloat16x8_t va2x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm3));
vacc2x3 = vbfdotq_f32(vacc2x3, va2x3, vb3);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123);
bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123);
if XNN_LIKELY(nc >= 4) {
vst1_bf16(c0, vout0x0123);
c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
vst1_bf16(c1, vout1x0123);
c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride);
vst1_bf16(c2, vout2x0123);
c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride);
a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc);
a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2;
vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2));
vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2));
}
if (nc & 1) {
vst1_lane_bf16(c0, vout0x0123, 0);
vst1_lane_bf16(c1, vout1x0123, 0);
vst1_lane_bf16(c2, vout2x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,774 | 45.327014 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-3x4c8-minmax-neonbf16-bfmlal.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neonbf16.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_3x4c8__neonbf16_bfmlal(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(bfloat16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const bfloat16_t* a0 = (const bfloat16_t*) a;
bfloat16_t* c0 = (bfloat16_t*) c;
const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride);
bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride);
bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const bfloat16_t* w = (const bfloat16_t*) w_ptr;
do {
float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
float32x4_t vacc2x0 = vacc0x0;
float32x4_t vacc2x1 = vacc0x1;
float32x4_t vacc2x2 = vacc0x2;
float32x4_t vacc2x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8;
const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8;
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
vacc0x0 = vbfmlalbq_f32(vacc0x0, va0, vb0);
vacc1x0 = vbfmlalbq_f32(vacc1x0, va1, vb0);
vacc2x0 = vbfmlalbq_f32(vacc2x0, va2, vb0);
vacc0x1 = vbfmlalbq_f32(vacc0x1, va0, vb1);
vacc1x1 = vbfmlalbq_f32(vacc1x1, va1, vb1);
vacc2x1 = vbfmlalbq_f32(vacc2x1, va2, vb1);
vacc0x2 = vbfmlalbq_f32(vacc0x2, va0, vb2);
vacc1x2 = vbfmlalbq_f32(vacc1x2, va1, vb2);
vacc2x2 = vbfmlalbq_f32(vacc2x2, va2, vb2);
vacc0x3 = vbfmlalbq_f32(vacc0x3, va0, vb3);
vacc1x3 = vbfmlalbq_f32(vacc1x3, va1, vb3);
vacc2x3 = vbfmlalbq_f32(vacc2x3, va2, vb3);
vacc0x0 = vbfmlaltq_f32(vacc0x0, va0, vb0);
vacc1x0 = vbfmlaltq_f32(vacc1x0, va1, vb0);
vacc2x0 = vbfmlaltq_f32(vacc2x0, va2, vb0);
vacc0x1 = vbfmlaltq_f32(vacc0x1, va0, vb1);
vacc1x1 = vbfmlaltq_f32(vacc1x1, va1, vb1);
vacc2x1 = vbfmlaltq_f32(vacc2x1, va2, vb1);
vacc0x2 = vbfmlaltq_f32(vacc0x2, va0, vb2);
vacc1x2 = vbfmlaltq_f32(vacc1x2, va1, vb2);
vacc2x2 = vbfmlaltq_f32(vacc2x2, va2, vb2);
vacc0x3 = vbfmlaltq_f32(vacc0x3, va0, vb3);
vacc1x3 = vbfmlaltq_f32(vacc1x3, va1, vb3);
vacc2x3 = vbfmlaltq_f32(vacc2x3, va2, vb3);
}
if XNN_UNLIKELY(k != 0) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k);
const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k);
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
vacc0x0 = vbfmlalbq_f32(vacc0x0, va0x0, vb0);
vacc0x0 = vbfmlaltq_f32(vacc0x0, va0x0, vb0);
const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0));
vacc1x0 = vbfmlalbq_f32(vacc1x0, va1x0, vb0);
vacc1x0 = vbfmlaltq_f32(vacc1x0, va1x0, vb0);
const bfloat16x8_t va2x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm0));
vacc2x0 = vbfmlalbq_f32(vacc2x0, va2x0, vb0);
vacc2x0 = vbfmlaltq_f32(vacc2x0, va2x0, vb0);
const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
vacc0x1 = vbfmlalbq_f32(vacc0x1, va0x1, vb1);
vacc0x1 = vbfmlaltq_f32(vacc0x1, va0x1, vb1);
const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1));
vacc1x1 = vbfmlalbq_f32(vacc1x1, va1x1, vb1);
vacc1x1 = vbfmlaltq_f32(vacc1x1, va1x1, vb1);
const bfloat16x8_t va2x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm1));
vacc2x1 = vbfmlalbq_f32(vacc2x1, va2x1, vb1);
vacc2x1 = vbfmlaltq_f32(vacc2x1, va2x1, vb1);
const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
vacc0x2 = vbfmlalbq_f32(vacc0x2, va0x2, vb2);
vacc0x2 = vbfmlaltq_f32(vacc0x2, va0x2, vb2);
const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2));
vacc1x2 = vbfmlalbq_f32(vacc1x2, va1x2, vb2);
vacc1x2 = vbfmlaltq_f32(vacc1x2, va1x2, vb2);
const bfloat16x8_t va2x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm2));
vacc2x2 = vbfmlalbq_f32(vacc2x2, va2x2, vb2);
vacc2x2 = vbfmlaltq_f32(vacc2x2, va2x2, vb2);
const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
vacc0x3 = vbfmlalbq_f32(vacc0x3, va0x3, vb3);
vacc0x3 = vbfmlaltq_f32(vacc0x3, va0x3, vb3);
const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3));
vacc1x3 = vbfmlalbq_f32(vacc1x3, va1x3, vb3);
vacc1x3 = vbfmlaltq_f32(vacc1x3, va1x3, vb3);
const bfloat16x8_t va2x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm3));
vacc2x3 = vbfmlalbq_f32(vacc2x3, va2x3, vb3);
vacc2x3 = vbfmlaltq_f32(vacc2x3, va2x3, vb3);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123);
bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123);
if XNN_LIKELY(nc >= 4) {
vst1_bf16(c0, vout0x0123);
c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
vst1_bf16(c1, vout1x0123);
c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride);
vst1_bf16(c2, vout2x0123);
c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride);
a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc);
a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2;
vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2));
vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2));
}
if (nc & 1) {
vst1_lane_bf16(c0, vout0x0123, 0);
vst1_lane_bf16(c1, vout1x0123, 0);
vst1_lane_bf16(c2, vout2x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,048 | 45.817797 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-3x4c8-minmax-neonfma-shland.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neon-shland.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_3x4c8__neonfma_shland(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* w = (const uint16_t*) w_ptr;
const uint16x8_t vmask = vreinterpretq_u16_u32(vmovq_n_u32(UINT32_C(0xFFFF0000)));
do {
float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
float32x4_t vacc2x0 = vacc0x0;
float32x4_t vacc2x1 = vacc0x1;
float32x4_t vacc2x2 = vacc0x2;
float32x4_t vacc2x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) {
const uint16x8_t va0 = vld1q_u16(a0); a0 += 8;
const uint16x8_t va1 = vld1q_u16(a1); a1 += 8;
const uint16x8_t va2 = vld1q_u16(a2); a2 += 8;
const uint16x8_t vb0 = vld1q_u16(w); w += 8;
const uint16x8_t vb1 = vld1q_u16(w); w += 8;
const uint16x8_t vb2 = vld1q_u16(w); w += 8;
const uint16x8_t vb3 = vld1q_u16(w); w += 8;
const float32x4_t va0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0), 16));
const float32x4_t va1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1), 16));
const float32x4_t va2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2), 16));
const float32x4_t vb0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb0), 16));
const float32x4_t vb1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb1), 16));
const float32x4_t vb2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb2), 16));
const float32x4_t vb3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb3), 16));
vacc0x0 = vfmaq_f32(vacc0x0, va0e, vb0e);
vacc1x0 = vfmaq_f32(vacc1x0, va1e, vb0e);
vacc2x0 = vfmaq_f32(vacc2x0, va2e, vb0e);
vacc0x1 = vfmaq_f32(vacc0x1, va0e, vb1e);
vacc1x1 = vfmaq_f32(vacc1x1, va1e, vb1e);
vacc2x1 = vfmaq_f32(vacc2x1, va2e, vb1e);
vacc0x2 = vfmaq_f32(vacc0x2, va0e, vb2e);
vacc1x2 = vfmaq_f32(vacc1x2, va1e, vb2e);
vacc2x2 = vfmaq_f32(vacc2x2, va2e, vb2e);
vacc0x3 = vfmaq_f32(vacc0x3, va0e, vb3e);
vacc1x3 = vfmaq_f32(vacc1x3, va1e, vb3e);
vacc2x3 = vfmaq_f32(vacc2x3, va2e, vb3e);
const float32x4_t va0o = vreinterpretq_f32_u16(vandq_u16(va0, vmask));
const float32x4_t va1o = vreinterpretq_f32_u16(vandq_u16(va1, vmask));
const float32x4_t va2o = vreinterpretq_f32_u16(vandq_u16(va2, vmask));
const float32x4_t vb0o = vreinterpretq_f32_u16(vandq_u16(vb0, vmask));
const float32x4_t vb1o = vreinterpretq_f32_u16(vandq_u16(vb1, vmask));
const float32x4_t vb2o = vreinterpretq_f32_u16(vandq_u16(vb2, vmask));
const float32x4_t vb3o = vreinterpretq_f32_u16(vandq_u16(vb3, vmask));
vacc0x0 = vfmaq_f32(vacc0x0, va0o, vb0o);
vacc1x0 = vfmaq_f32(vacc1x0, va1o, vb0o);
vacc2x0 = vfmaq_f32(vacc2x0, va2o, vb0o);
vacc0x1 = vfmaq_f32(vacc0x1, va0o, vb1o);
vacc1x1 = vfmaq_f32(vacc1x1, va1o, vb1o);
vacc2x1 = vfmaq_f32(vacc2x1, va2o, vb1o);
vacc0x2 = vfmaq_f32(vacc0x2, va0o, vb2o);
vacc1x2 = vfmaq_f32(vacc1x2, va1o, vb2o);
vacc2x2 = vfmaq_f32(vacc2x2, va2o, vb2o);
vacc0x3 = vfmaq_f32(vacc0x3, va0o, vb3o);
vacc1x3 = vfmaq_f32(vacc1x3, va1o, vb3o);
vacc2x3 = vfmaq_f32(vacc2x3, va2o, vb3o);
}
if XNN_UNLIKELY(k != 0) {
const uint16x8_t va0 = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k);
const uint16x8_t va1 = vld1q_u16(a1); a1 = (const uint16_t*) ((uintptr_t) a1 + k);
const uint16x8_t va2 = vld1q_u16(a2); a2 = (const uint16_t*) ((uintptr_t) a2 + k);
const uint16x8_t vb0 = vld1q_u16(w); w += 8;
const uint16x8_t vb1 = vld1q_u16(w); w += 8;
const uint16x8_t vb2 = vld1q_u16(w); w += 8;
const uint16x8_t vb3 = vld1q_u16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vb0, vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vb1, vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vb2, vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vb3, vmovq_n_u16(0));
const float32x4_t vb0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb0), 16));
const float32x4_t vb1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb1), 16));
const float32x4_t vb2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb2), 16));
const float32x4_t vb3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb3), 16));
const uint16x8_t va0x0 = vbicq_u16(va0, vm0);
const uint16x8_t va1x0 = vbicq_u16(va1, vm0);
const uint16x8_t va2x0 = vbicq_u16(va2, vm0);
const uint16x8_t va0x1 = vbicq_u16(va0, vm1);
const uint16x8_t va1x1 = vbicq_u16(va1, vm1);
const uint16x8_t va2x1 = vbicq_u16(va2, vm1);
const uint16x8_t va0x2 = vbicq_u16(va0, vm2);
const uint16x8_t va1x2 = vbicq_u16(va1, vm2);
const uint16x8_t va2x2 = vbicq_u16(va2, vm2);
const uint16x8_t va0x3 = vbicq_u16(va0, vm3);
const uint16x8_t va1x3 = vbicq_u16(va1, vm3);
const uint16x8_t va2x3 = vbicq_u16(va2, vm3);
const float32x4_t va0x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x0), 16));
const float32x4_t va1x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x0), 16));
const float32x4_t va2x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x0), 16));
const float32x4_t va0x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x1), 16));
const float32x4_t va1x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x1), 16));
const float32x4_t va2x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x1), 16));
const float32x4_t va0x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x2), 16));
const float32x4_t va1x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x2), 16));
const float32x4_t va2x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x2), 16));
const float32x4_t va0x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x3), 16));
const float32x4_t va1x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x3), 16));
const float32x4_t va2x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x3), 16));
vacc0x0 = vfmaq_f32(vacc0x0, va0x0e, vb0e);
vacc1x0 = vfmaq_f32(vacc1x0, va1x0e, vb0e);
vacc2x0 = vfmaq_f32(vacc2x0, va2x0e, vb0e);
vacc0x1 = vfmaq_f32(vacc0x1, va0x1e, vb1e);
vacc1x1 = vfmaq_f32(vacc1x1, va1x1e, vb1e);
vacc2x1 = vfmaq_f32(vacc2x1, va2x1e, vb1e);
vacc0x2 = vfmaq_f32(vacc0x2, va0x2e, vb2e);
vacc1x2 = vfmaq_f32(vacc1x2, va1x2e, vb2e);
vacc2x2 = vfmaq_f32(vacc2x2, va2x2e, vb2e);
vacc0x3 = vfmaq_f32(vacc0x3, va0x3e, vb3e);
vacc1x3 = vfmaq_f32(vacc1x3, va1x3e, vb3e);
vacc2x3 = vfmaq_f32(vacc2x3, va2x3e, vb3e);
const float32x4_t vb0o = vreinterpretq_f32_u16(vandq_u16(vb0, vmask));
const float32x4_t vb1o = vreinterpretq_f32_u16(vandq_u16(vb1, vmask));
const float32x4_t vb2o = vreinterpretq_f32_u16(vandq_u16(vb2, vmask));
const float32x4_t vb3o = vreinterpretq_f32_u16(vandq_u16(vb3, vmask));
const float32x4_t va0x0o = vreinterpretq_f32_u16(vandq_u16(va0x0, vmask));
const float32x4_t va1x0o = vreinterpretq_f32_u16(vandq_u16(va1x0, vmask));
const float32x4_t va2x0o = vreinterpretq_f32_u16(vandq_u16(va2x0, vmask));
const float32x4_t va0x1o = vreinterpretq_f32_u16(vandq_u16(va0x1, vmask));
const float32x4_t va1x1o = vreinterpretq_f32_u16(vandq_u16(va1x1, vmask));
const float32x4_t va2x1o = vreinterpretq_f32_u16(vandq_u16(va2x1, vmask));
const float32x4_t va0x2o = vreinterpretq_f32_u16(vandq_u16(va0x2, vmask));
const float32x4_t va1x2o = vreinterpretq_f32_u16(vandq_u16(va1x2, vmask));
const float32x4_t va2x2o = vreinterpretq_f32_u16(vandq_u16(va2x2, vmask));
const float32x4_t va0x3o = vreinterpretq_f32_u16(vandq_u16(va0x3, vmask));
const float32x4_t va1x3o = vreinterpretq_f32_u16(vandq_u16(va1x3, vmask));
const float32x4_t va2x3o = vreinterpretq_f32_u16(vandq_u16(va2x3, vmask));
vacc0x0 = vfmaq_f32(vacc0x0, va0x0o, vb0o);
vacc1x0 = vfmaq_f32(vacc1x0, va1x0o, vb0o);
vacc2x0 = vfmaq_f32(vacc2x0, va2x0o, vb0o);
vacc0x1 = vfmaq_f32(vacc0x1, va0x1o, vb1o);
vacc1x1 = vfmaq_f32(vacc1x1, va1x1o, vb1o);
vacc2x1 = vfmaq_f32(vacc2x1, va2x1o, vb1o);
vacc0x2 = vfmaq_f32(vacc0x2, va0x2o, vb2o);
vacc1x2 = vfmaq_f32(vacc1x2, va1x2o, vb2o);
vacc2x2 = vfmaq_f32(vacc2x2, va2x2o, vb2o);
vacc0x3 = vfmaq_f32(vacc0x3, va0x3o, vb3o);
vacc1x3 = vfmaq_f32(vacc1x3, va1x3o, vb3o);
vacc2x3 = vfmaq_f32(vacc2x3, va2x3o, vb3o);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16);
uint16x4_t vout1x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc1x0123), 16);
uint16x4_t vout2x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc2x0123), 16);
if XNN_LIKELY(nc >= 4) {
vst1_u16(c0, vout0x0123);
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1_u16(c1, vout1x0123);
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1_u16(c2, vout2x0123);
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_u16(vout1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_u16(vout2x0123), 0); c2 += 2;
vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2);
vout1x0123 = vext_u16(vout1x0123, vout1x0123, 2);
vout2x0123 = vext_u16(vout2x0123, vout2x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vout0x0123, 0);
vst1_lane_u16(c1, vout1x0123, 0);
vst1_lane_u16(c2, vout2x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,260 | 47.672355 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-3x4c8-minmax-neonfma-zip.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_3x4c8__neonfma_zip(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* w = (const uint16_t*) w_ptr;
const uint16x8_t vzero = vmovq_n_u16(0);
do {
float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
float32x4_t vacc2x0 = vacc0x0;
float32x4_t vacc2x1 = vacc0x1;
float32x4_t vacc2x2 = vacc0x2;
float32x4_t vacc2x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) {
const uint16x8_t va0h = vld1q_u16(a0); a0 += 8;
const uint16x8_t va1h = vld1q_u16(a1); a1 += 8;
const uint16x8_t va2h = vld1q_u16(a2); a2 += 8;
const uint16x8_t vb0h = vld1q_u16(w); w += 8;
const uint16x8_t vb1h = vld1q_u16(w); w += 8;
const uint16x8_t vb2h = vld1q_u16(w); w += 8;
const uint16x8_t vb3h = vld1q_u16(w); w += 8;
const uint16x8x2_t va0f = vzipq_u16(vzero, va0h);
const uint16x8x2_t va1f = vzipq_u16(vzero, va1h);
const uint16x8x2_t va2f = vzipq_u16(vzero, va2h);
const uint16x8x2_t vb0f = vzipq_u16(vzero, vb0h);
const uint16x8x2_t vb1f = vzipq_u16(vzero, vb1h);
const uint16x8x2_t vb2f = vzipq_u16(vzero, vb2h);
const uint16x8x2_t vb3f = vzipq_u16(vzero, vb3h);
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
}
if XNN_UNLIKELY(k != 0) {
const uint16x8_t va0h = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k);
const uint16x8_t va1h = vld1q_u16(a1); a1 = (const uint16_t*) ((uintptr_t) a1 + k);
const uint16x8_t va2h = vld1q_u16(a2); a2 = (const uint16_t*) ((uintptr_t) a2 + k);
const uint16x8_t vb0h = vld1q_u16(w); w += 8;
const uint16x8_t vb1h = vld1q_u16(w); w += 8;
const uint16x8_t vb2h = vld1q_u16(w); w += 8;
const uint16x8_t vb3h = vld1q_u16(w); w += 8;
const uint16x8_t vm0h = vceqq_u16(vb0h, vmovq_n_u16(0));
const uint16x8_t vm1h = vceqq_u16(vb1h, vmovq_n_u16(0));
const uint16x8_t vm2h = vceqq_u16(vb2h, vmovq_n_u16(0));
const uint16x8_t vm3h = vceqq_u16(vb3h, vmovq_n_u16(0));
const uint16x8x2_t vb0f = vzipq_u16(vzero, vb0h);
const uint16x8x2_t vb1f = vzipq_u16(vzero, vb1h);
const uint16x8x2_t vb2f = vzipq_u16(vzero, vb2h);
const uint16x8x2_t vb3f = vzipq_u16(vzero, vb3h);
const uint16x8_t va0x0h = vbicq_u16(va0h, vm0h);
const uint16x8_t va1x0h = vbicq_u16(va1h, vm0h);
const uint16x8_t va2x0h = vbicq_u16(va2h, vm0h);
const uint16x8_t va0x1h = vbicq_u16(va0h, vm1h);
const uint16x8_t va1x1h = vbicq_u16(va1h, vm1h);
const uint16x8_t va2x1h = vbicq_u16(va2h, vm1h);
const uint16x8_t va0x2h = vbicq_u16(va0h, vm2h);
const uint16x8_t va1x2h = vbicq_u16(va1h, vm2h);
const uint16x8_t va2x2h = vbicq_u16(va2h, vm2h);
const uint16x8_t va0x3h = vbicq_u16(va0h, vm3h);
const uint16x8_t va1x3h = vbicq_u16(va1h, vm3h);
const uint16x8_t va2x3h = vbicq_u16(va2h, vm3h);
const uint16x8x2_t va0x0f = vzipq_u16(vzero, va0x0h);
const uint16x8x2_t va1x0f = vzipq_u16(vzero, va1x0h);
const uint16x8x2_t va2x0f = vzipq_u16(vzero, va2x0h);
const uint16x8x2_t va0x1f = vzipq_u16(vzero, va0x1h);
const uint16x8x2_t va1x1f = vzipq_u16(vzero, va1x1h);
const uint16x8x2_t va2x1f = vzipq_u16(vzero, va2x1h);
const uint16x8x2_t va0x2f = vzipq_u16(vzero, va0x2h);
const uint16x8x2_t va1x2f = vzipq_u16(vzero, va1x2h);
const uint16x8x2_t va2x2f = vzipq_u16(vzero, va2x2h);
const uint16x8x2_t va0x3f = vzipq_u16(vzero, va0x3h);
const uint16x8x2_t va1x3f = vzipq_u16(vzero, va1x3h);
const uint16x8x2_t va2x3f = vzipq_u16(vzero, va2x3h);
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16);
uint16x4_t vout1x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc1x0123), 16);
uint16x4_t vout2x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc2x0123), 16);
if XNN_LIKELY(nc >= 4) {
vst1_u16(c0, vout0x0123);
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1_u16(c1, vout1x0123);
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1_u16(c2, vout2x0123);
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_u16(vout1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_u16(vout2x0123), 0); c2 += 2;
vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2);
vout1x0123 = vext_u16(vout1x0123, vout1x0123, 2);
vout2x0123 = vext_u16(vout2x0123, vout2x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vout0x0123, 0);
vst1_lane_u16(c1, vout1x0123, 0);
vst1_lane_u16(c2, vout2x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,338 | 52.906015 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-4x4c8-minmax-neonbf16-bfdot.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neonbf16.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_4x4c8__neonbf16_bfdot(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(bfloat16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const bfloat16_t* a0 = (const bfloat16_t*) a;
bfloat16_t* c0 = (bfloat16_t*) c;
const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride);
bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride);
bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const bfloat16_t* a3 = (const bfloat16_t*) ((uintptr_t) a2 + a_stride);
bfloat16_t* c3 = (bfloat16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const bfloat16_t* w = (const bfloat16_t*) w_ptr;
do {
float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
float32x4_t vacc2x0 = vacc0x0;
float32x4_t vacc2x1 = vacc0x1;
float32x4_t vacc2x2 = vacc0x2;
float32x4_t vacc2x3 = vacc0x3;
float32x4_t vacc3x0 = vacc0x0;
float32x4_t vacc3x1 = vacc0x1;
float32x4_t vacc3x2 = vacc0x2;
float32x4_t vacc3x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8;
const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8;
const bfloat16x8_t va3 = vld1q_bf16(a3); a3 += 8;
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
vacc0x0 = vbfdotq_f32(vacc0x0, va0, vb0);
vacc1x0 = vbfdotq_f32(vacc1x0, va1, vb0);
vacc2x0 = vbfdotq_f32(vacc2x0, va2, vb0);
vacc3x0 = vbfdotq_f32(vacc3x0, va3, vb0);
vacc0x1 = vbfdotq_f32(vacc0x1, va0, vb1);
vacc1x1 = vbfdotq_f32(vacc1x1, va1, vb1);
vacc2x1 = vbfdotq_f32(vacc2x1, va2, vb1);
vacc3x1 = vbfdotq_f32(vacc3x1, va3, vb1);
vacc0x2 = vbfdotq_f32(vacc0x2, va0, vb2);
vacc1x2 = vbfdotq_f32(vacc1x2, va1, vb2);
vacc2x2 = vbfdotq_f32(vacc2x2, va2, vb2);
vacc3x2 = vbfdotq_f32(vacc3x2, va3, vb2);
vacc0x3 = vbfdotq_f32(vacc0x3, va0, vb3);
vacc1x3 = vbfdotq_f32(vacc1x3, va1, vb3);
vacc2x3 = vbfdotq_f32(vacc2x3, va2, vb3);
vacc3x3 = vbfdotq_f32(vacc3x3, va3, vb3);
}
if XNN_UNLIKELY(k != 0) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k);
const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k);
const bfloat16x8_t va3 = vld1q_bf16(a3); a3 = (const bfloat16_t*) ((uintptr_t) a3 + k);
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
vacc0x0 = vbfdotq_f32(vacc0x0, va0x0, vb0);
const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0));
vacc1x0 = vbfdotq_f32(vacc1x0, va1x0, vb0);
const bfloat16x8_t va2x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm0));
vacc2x0 = vbfdotq_f32(vacc2x0, va2x0, vb0);
const bfloat16x8_t va3x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm0));
vacc3x0 = vbfdotq_f32(vacc3x0, va3x0, vb0);
const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
vacc0x1 = vbfdotq_f32(vacc0x1, va0x1, vb1);
const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1));
vacc1x1 = vbfdotq_f32(vacc1x1, va1x1, vb1);
const bfloat16x8_t va2x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm1));
vacc2x1 = vbfdotq_f32(vacc2x1, va2x1, vb1);
const bfloat16x8_t va3x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm1));
vacc3x1 = vbfdotq_f32(vacc3x1, va3x1, vb1);
const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
vacc0x2 = vbfdotq_f32(vacc0x2, va0x2, vb2);
const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2));
vacc1x2 = vbfdotq_f32(vacc1x2, va1x2, vb2);
const bfloat16x8_t va2x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm2));
vacc2x2 = vbfdotq_f32(vacc2x2, va2x2, vb2);
const bfloat16x8_t va3x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm2));
vacc3x2 = vbfdotq_f32(vacc3x2, va3x2, vb2);
const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
vacc0x3 = vbfdotq_f32(vacc0x3, va0x3, vb3);
const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3));
vacc1x3 = vbfdotq_f32(vacc1x3, va1x3, vb3);
const bfloat16x8_t va2x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm3));
vacc2x3 = vbfdotq_f32(vacc2x3, va2x3, vb3);
const bfloat16x8_t va3x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm3));
vacc3x3 = vbfdotq_f32(vacc3x3, va3x3, vb3);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
const float32x4_t vacc3x01 = vpaddq_f32(vacc3x0, vacc3x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
const float32x4_t vacc3x23 = vpaddq_f32(vacc3x2, vacc3x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
const float32x2_t vsum3x0 = vadd_f32(vget_low_f32(vacc3x0), vget_high_f32(vacc3x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
const float32x2_t vsum3x1 = vadd_f32(vget_low_f32(vacc3x1), vget_high_f32(vacc3x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
const float32x2_t vsum3x2 = vadd_f32(vget_low_f32(vacc3x2), vget_high_f32(vacc3x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
const float32x2_t vsum3x3 = vadd_f32(vget_low_f32(vacc3x3), vget_high_f32(vacc3x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123);
bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123);
bfloat16x4_t vout3x0123 = vcvt_bf16_f32(vacc3x0123);
if XNN_LIKELY(nc >= 4) {
vst1_bf16(c0, vout0x0123);
c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
vst1_bf16(c1, vout1x0123);
c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride);
vst1_bf16(c2, vout2x0123);
c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride);
vst1_bf16(c3, vout3x0123);
c3 = (bfloat16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc);
a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc);
a3 = (const bfloat16_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_bf16(vout3x0123), 0); c3 += 2;
vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2));
vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2));
vout3x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout3x0123), vreinterpret_u16_bf16(vout3x0123), 2));
}
if (nc & 1) {
vst1_lane_bf16(c0, vout0x0123, 0);
vst1_lane_bf16(c1, vout1x0123, 0);
vst1_lane_bf16(c2, vout2x0123, 0);
vst1_lane_bf16(c3, vout3x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,252 | 47.623016 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-4x4c8-minmax-neonbf16-bfmlal.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neonbf16.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_4x4c8__neonbf16_bfmlal(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(bfloat16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const bfloat16_t* a0 = (const bfloat16_t*) a;
bfloat16_t* c0 = (bfloat16_t*) c;
const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride);
bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride);
bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const bfloat16_t* a3 = (const bfloat16_t*) ((uintptr_t) a2 + a_stride);
bfloat16_t* c3 = (bfloat16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const bfloat16_t* w = (const bfloat16_t*) w_ptr;
do {
float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
float32x4_t vacc2x0 = vacc0x0;
float32x4_t vacc2x1 = vacc0x1;
float32x4_t vacc2x2 = vacc0x2;
float32x4_t vacc2x3 = vacc0x3;
float32x4_t vacc3x0 = vacc0x0;
float32x4_t vacc3x1 = vacc0x1;
float32x4_t vacc3x2 = vacc0x2;
float32x4_t vacc3x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8;
const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8;
const bfloat16x8_t va3 = vld1q_bf16(a3); a3 += 8;
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
vacc0x0 = vbfmlalbq_f32(vacc0x0, va0, vb0);
vacc1x0 = vbfmlalbq_f32(vacc1x0, va1, vb0);
vacc2x0 = vbfmlalbq_f32(vacc2x0, va2, vb0);
vacc3x0 = vbfmlalbq_f32(vacc3x0, va3, vb0);
vacc0x1 = vbfmlalbq_f32(vacc0x1, va0, vb1);
vacc1x1 = vbfmlalbq_f32(vacc1x1, va1, vb1);
vacc2x1 = vbfmlalbq_f32(vacc2x1, va2, vb1);
vacc3x1 = vbfmlalbq_f32(vacc3x1, va3, vb1);
vacc0x2 = vbfmlalbq_f32(vacc0x2, va0, vb2);
vacc1x2 = vbfmlalbq_f32(vacc1x2, va1, vb2);
vacc2x2 = vbfmlalbq_f32(vacc2x2, va2, vb2);
vacc3x2 = vbfmlalbq_f32(vacc3x2, va3, vb2);
vacc0x3 = vbfmlalbq_f32(vacc0x3, va0, vb3);
vacc1x3 = vbfmlalbq_f32(vacc1x3, va1, vb3);
vacc2x3 = vbfmlalbq_f32(vacc2x3, va2, vb3);
vacc3x3 = vbfmlalbq_f32(vacc3x3, va3, vb3);
vacc0x0 = vbfmlaltq_f32(vacc0x0, va0, vb0);
vacc1x0 = vbfmlaltq_f32(vacc1x0, va1, vb0);
vacc2x0 = vbfmlaltq_f32(vacc2x0, va2, vb0);
vacc3x0 = vbfmlaltq_f32(vacc3x0, va3, vb0);
vacc0x1 = vbfmlaltq_f32(vacc0x1, va0, vb1);
vacc1x1 = vbfmlaltq_f32(vacc1x1, va1, vb1);
vacc2x1 = vbfmlaltq_f32(vacc2x1, va2, vb1);
vacc3x1 = vbfmlaltq_f32(vacc3x1, va3, vb1);
vacc0x2 = vbfmlaltq_f32(vacc0x2, va0, vb2);
vacc1x2 = vbfmlaltq_f32(vacc1x2, va1, vb2);
vacc2x2 = vbfmlaltq_f32(vacc2x2, va2, vb2);
vacc3x2 = vbfmlaltq_f32(vacc3x2, va3, vb2);
vacc0x3 = vbfmlaltq_f32(vacc0x3, va0, vb3);
vacc1x3 = vbfmlaltq_f32(vacc1x3, va1, vb3);
vacc2x3 = vbfmlaltq_f32(vacc2x3, va2, vb3);
vacc3x3 = vbfmlaltq_f32(vacc3x3, va3, vb3);
}
if XNN_UNLIKELY(k != 0) {
const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k);
const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k);
const bfloat16x8_t va3 = vld1q_bf16(a3); a3 = (const bfloat16_t*) ((uintptr_t) a3 + k);
const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
vacc0x0 = vbfmlalbq_f32(vacc0x0, va0x0, vb0);
vacc0x0 = vbfmlaltq_f32(vacc0x0, va0x0, vb0);
const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0));
vacc1x0 = vbfmlalbq_f32(vacc1x0, va1x0, vb0);
vacc1x0 = vbfmlaltq_f32(vacc1x0, va1x0, vb0);
const bfloat16x8_t va2x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm0));
vacc2x0 = vbfmlalbq_f32(vacc2x0, va2x0, vb0);
vacc2x0 = vbfmlaltq_f32(vacc2x0, va2x0, vb0);
const bfloat16x8_t va3x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm0));
vacc3x0 = vbfmlalbq_f32(vacc3x0, va3x0, vb0);
vacc3x0 = vbfmlaltq_f32(vacc3x0, va3x0, vb0);
const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
vacc0x1 = vbfmlalbq_f32(vacc0x1, va0x1, vb1);
vacc0x1 = vbfmlaltq_f32(vacc0x1, va0x1, vb1);
const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1));
vacc1x1 = vbfmlalbq_f32(vacc1x1, va1x1, vb1);
vacc1x1 = vbfmlaltq_f32(vacc1x1, va1x1, vb1);
const bfloat16x8_t va2x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm1));
vacc2x1 = vbfmlalbq_f32(vacc2x1, va2x1, vb1);
vacc2x1 = vbfmlaltq_f32(vacc2x1, va2x1, vb1);
const bfloat16x8_t va3x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm1));
vacc3x1 = vbfmlalbq_f32(vacc3x1, va3x1, vb1);
vacc3x1 = vbfmlaltq_f32(vacc3x1, va3x1, vb1);
const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
vacc0x2 = vbfmlalbq_f32(vacc0x2, va0x2, vb2);
vacc0x2 = vbfmlaltq_f32(vacc0x2, va0x2, vb2);
const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2));
vacc1x2 = vbfmlalbq_f32(vacc1x2, va1x2, vb2);
vacc1x2 = vbfmlaltq_f32(vacc1x2, va1x2, vb2);
const bfloat16x8_t va2x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm2));
vacc2x2 = vbfmlalbq_f32(vacc2x2, va2x2, vb2);
vacc2x2 = vbfmlaltq_f32(vacc2x2, va2x2, vb2);
const bfloat16x8_t va3x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm2));
vacc3x2 = vbfmlalbq_f32(vacc3x2, va3x2, vb2);
vacc3x2 = vbfmlaltq_f32(vacc3x2, va3x2, vb2);
const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
vacc0x3 = vbfmlalbq_f32(vacc0x3, va0x3, vb3);
vacc0x3 = vbfmlaltq_f32(vacc0x3, va0x3, vb3);
const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3));
vacc1x3 = vbfmlalbq_f32(vacc1x3, va1x3, vb3);
vacc1x3 = vbfmlaltq_f32(vacc1x3, va1x3, vb3);
const bfloat16x8_t va2x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm3));
vacc2x3 = vbfmlalbq_f32(vacc2x3, va2x3, vb3);
vacc2x3 = vbfmlaltq_f32(vacc2x3, va2x3, vb3);
const bfloat16x8_t va3x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm3));
vacc3x3 = vbfmlalbq_f32(vacc3x3, va3x3, vb3);
vacc3x3 = vbfmlaltq_f32(vacc3x3, va3x3, vb3);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
const float32x4_t vacc3x01 = vpaddq_f32(vacc3x0, vacc3x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
const float32x4_t vacc3x23 = vpaddq_f32(vacc3x2, vacc3x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
const float32x2_t vsum3x0 = vadd_f32(vget_low_f32(vacc3x0), vget_high_f32(vacc3x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
const float32x2_t vsum3x1 = vadd_f32(vget_low_f32(vacc3x1), vget_high_f32(vacc3x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
const float32x2_t vsum3x2 = vadd_f32(vget_low_f32(vacc3x2), vget_high_f32(vacc3x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
const float32x2_t vsum3x3 = vadd_f32(vget_low_f32(vacc3x3), vget_high_f32(vacc3x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123);
bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123);
bfloat16x4_t vout3x0123 = vcvt_bf16_f32(vacc3x0123);
if XNN_LIKELY(nc >= 4) {
vst1_bf16(c0, vout0x0123);
c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
vst1_bf16(c1, vout1x0123);
c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride);
vst1_bf16(c2, vout2x0123);
c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride);
vst1_bf16(c3, vout3x0123);
c3 = (bfloat16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc);
a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc);
a3 = (const bfloat16_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_bf16(vout3x0123), 0); c3 += 2;
vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2));
vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2));
vout3x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout3x0123), vreinterpret_u16_bf16(vout3x0123), 2));
}
if (nc & 1) {
vst1_lane_bf16(c0, vout0x0123, 0);
vst1_lane_bf16(c1, vout1x0123, 0);
vst1_lane_bf16(c2, vout2x0123, 0);
vst1_lane_bf16(c3, vout3x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,950 | 47.950877 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-4x4c8-minmax-neonfma-shland.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neon-shland.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_4x4c8__neonfma_shland(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* w = (const uint16_t*) w_ptr;
const uint16x8_t vmask = vreinterpretq_u16_u32(vmovq_n_u32(UINT32_C(0xFFFF0000)));
do {
float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
float32x4_t vacc2x0 = vacc0x0;
float32x4_t vacc2x1 = vacc0x1;
float32x4_t vacc2x2 = vacc0x2;
float32x4_t vacc2x3 = vacc0x3;
float32x4_t vacc3x0 = vacc0x0;
float32x4_t vacc3x1 = vacc0x1;
float32x4_t vacc3x2 = vacc0x2;
float32x4_t vacc3x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) {
const uint16x8_t va0 = vld1q_u16(a0); a0 += 8;
const uint16x8_t va1 = vld1q_u16(a1); a1 += 8;
const uint16x8_t va2 = vld1q_u16(a2); a2 += 8;
const uint16x8_t va3 = vld1q_u16(a3); a3 += 8;
const uint16x8_t vb0 = vld1q_u16(w); w += 8;
const uint16x8_t vb1 = vld1q_u16(w); w += 8;
const uint16x8_t vb2 = vld1q_u16(w); w += 8;
const uint16x8_t vb3 = vld1q_u16(w); w += 8;
const float32x4_t va0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0), 16));
const float32x4_t va1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1), 16));
const float32x4_t va2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2), 16));
const float32x4_t va3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va3), 16));
const float32x4_t vb0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb0), 16));
const float32x4_t vb1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb1), 16));
const float32x4_t vb2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb2), 16));
const float32x4_t vb3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb3), 16));
vacc0x0 = vfmaq_f32(vacc0x0, va0e, vb0e);
vacc1x0 = vfmaq_f32(vacc1x0, va1e, vb0e);
vacc2x0 = vfmaq_f32(vacc2x0, va2e, vb0e);
vacc3x0 = vfmaq_f32(vacc3x0, va3e, vb0e);
vacc0x1 = vfmaq_f32(vacc0x1, va0e, vb1e);
vacc1x1 = vfmaq_f32(vacc1x1, va1e, vb1e);
vacc2x1 = vfmaq_f32(vacc2x1, va2e, vb1e);
vacc3x1 = vfmaq_f32(vacc3x1, va3e, vb1e);
vacc0x2 = vfmaq_f32(vacc0x2, va0e, vb2e);
vacc1x2 = vfmaq_f32(vacc1x2, va1e, vb2e);
vacc2x2 = vfmaq_f32(vacc2x2, va2e, vb2e);
vacc3x2 = vfmaq_f32(vacc3x2, va3e, vb2e);
vacc0x3 = vfmaq_f32(vacc0x3, va0e, vb3e);
vacc1x3 = vfmaq_f32(vacc1x3, va1e, vb3e);
vacc2x3 = vfmaq_f32(vacc2x3, va2e, vb3e);
vacc3x3 = vfmaq_f32(vacc3x3, va3e, vb3e);
const float32x4_t va0o = vreinterpretq_f32_u16(vandq_u16(va0, vmask));
const float32x4_t va1o = vreinterpretq_f32_u16(vandq_u16(va1, vmask));
const float32x4_t va2o = vreinterpretq_f32_u16(vandq_u16(va2, vmask));
const float32x4_t va3o = vreinterpretq_f32_u16(vandq_u16(va3, vmask));
const float32x4_t vb0o = vreinterpretq_f32_u16(vandq_u16(vb0, vmask));
const float32x4_t vb1o = vreinterpretq_f32_u16(vandq_u16(vb1, vmask));
const float32x4_t vb2o = vreinterpretq_f32_u16(vandq_u16(vb2, vmask));
const float32x4_t vb3o = vreinterpretq_f32_u16(vandq_u16(vb3, vmask));
vacc0x0 = vfmaq_f32(vacc0x0, va0o, vb0o);
vacc1x0 = vfmaq_f32(vacc1x0, va1o, vb0o);
vacc2x0 = vfmaq_f32(vacc2x0, va2o, vb0o);
vacc3x0 = vfmaq_f32(vacc3x0, va3o, vb0o);
vacc0x1 = vfmaq_f32(vacc0x1, va0o, vb1o);
vacc1x1 = vfmaq_f32(vacc1x1, va1o, vb1o);
vacc2x1 = vfmaq_f32(vacc2x1, va2o, vb1o);
vacc3x1 = vfmaq_f32(vacc3x1, va3o, vb1o);
vacc0x2 = vfmaq_f32(vacc0x2, va0o, vb2o);
vacc1x2 = vfmaq_f32(vacc1x2, va1o, vb2o);
vacc2x2 = vfmaq_f32(vacc2x2, va2o, vb2o);
vacc3x2 = vfmaq_f32(vacc3x2, va3o, vb2o);
vacc0x3 = vfmaq_f32(vacc0x3, va0o, vb3o);
vacc1x3 = vfmaq_f32(vacc1x3, va1o, vb3o);
vacc2x3 = vfmaq_f32(vacc2x3, va2o, vb3o);
vacc3x3 = vfmaq_f32(vacc3x3, va3o, vb3o);
}
if XNN_UNLIKELY(k != 0) {
const uint16x8_t va0 = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k);
const uint16x8_t va1 = vld1q_u16(a1); a1 = (const uint16_t*) ((uintptr_t) a1 + k);
const uint16x8_t va2 = vld1q_u16(a2); a2 = (const uint16_t*) ((uintptr_t) a2 + k);
const uint16x8_t va3 = vld1q_u16(a3); a3 = (const uint16_t*) ((uintptr_t) a3 + k);
const uint16x8_t vb0 = vld1q_u16(w); w += 8;
const uint16x8_t vb1 = vld1q_u16(w); w += 8;
const uint16x8_t vb2 = vld1q_u16(w); w += 8;
const uint16x8_t vb3 = vld1q_u16(w); w += 8;
const uint16x8_t vm0 = vceqq_u16(vb0, vmovq_n_u16(0));
const uint16x8_t vm1 = vceqq_u16(vb1, vmovq_n_u16(0));
const uint16x8_t vm2 = vceqq_u16(vb2, vmovq_n_u16(0));
const uint16x8_t vm3 = vceqq_u16(vb3, vmovq_n_u16(0));
const float32x4_t vb0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb0), 16));
const float32x4_t vb1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb1), 16));
const float32x4_t vb2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb2), 16));
const float32x4_t vb3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(vb3), 16));
const uint16x8_t va0x0 = vbicq_u16(va0, vm0);
const uint16x8_t va1x0 = vbicq_u16(va1, vm0);
const uint16x8_t va2x0 = vbicq_u16(va2, vm0);
const uint16x8_t va3x0 = vbicq_u16(va3, vm0);
const uint16x8_t va0x1 = vbicq_u16(va0, vm1);
const uint16x8_t va1x1 = vbicq_u16(va1, vm1);
const uint16x8_t va2x1 = vbicq_u16(va2, vm1);
const uint16x8_t va3x1 = vbicq_u16(va3, vm1);
const uint16x8_t va0x2 = vbicq_u16(va0, vm2);
const uint16x8_t va1x2 = vbicq_u16(va1, vm2);
const uint16x8_t va2x2 = vbicq_u16(va2, vm2);
const uint16x8_t va3x2 = vbicq_u16(va3, vm2);
const uint16x8_t va0x3 = vbicq_u16(va0, vm3);
const uint16x8_t va1x3 = vbicq_u16(va1, vm3);
const uint16x8_t va2x3 = vbicq_u16(va2, vm3);
const uint16x8_t va3x3 = vbicq_u16(va3, vm3);
const float32x4_t va0x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x0), 16));
const float32x4_t va1x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x0), 16));
const float32x4_t va2x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x0), 16));
const float32x4_t va3x0e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va3x0), 16));
const float32x4_t va0x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x1), 16));
const float32x4_t va1x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x1), 16));
const float32x4_t va2x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x1), 16));
const float32x4_t va3x1e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va3x1), 16));
const float32x4_t va0x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x2), 16));
const float32x4_t va1x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x2), 16));
const float32x4_t va2x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x2), 16));
const float32x4_t va3x2e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va3x2), 16));
const float32x4_t va0x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va0x3), 16));
const float32x4_t va1x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va1x3), 16));
const float32x4_t va2x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va2x3), 16));
const float32x4_t va3x3e = vreinterpretq_f32_u32(vshlq_n_u32(vreinterpretq_u32_u16(va3x3), 16));
vacc0x0 = vfmaq_f32(vacc0x0, va0x0e, vb0e);
vacc1x0 = vfmaq_f32(vacc1x0, va1x0e, vb0e);
vacc2x0 = vfmaq_f32(vacc2x0, va2x0e, vb0e);
vacc3x0 = vfmaq_f32(vacc3x0, va3x0e, vb0e);
vacc0x1 = vfmaq_f32(vacc0x1, va0x1e, vb1e);
vacc1x1 = vfmaq_f32(vacc1x1, va1x1e, vb1e);
vacc2x1 = vfmaq_f32(vacc2x1, va2x1e, vb1e);
vacc3x1 = vfmaq_f32(vacc3x1, va3x1e, vb1e);
vacc0x2 = vfmaq_f32(vacc0x2, va0x2e, vb2e);
vacc1x2 = vfmaq_f32(vacc1x2, va1x2e, vb2e);
vacc2x2 = vfmaq_f32(vacc2x2, va2x2e, vb2e);
vacc3x2 = vfmaq_f32(vacc3x2, va3x2e, vb2e);
vacc0x3 = vfmaq_f32(vacc0x3, va0x3e, vb3e);
vacc1x3 = vfmaq_f32(vacc1x3, va1x3e, vb3e);
vacc2x3 = vfmaq_f32(vacc2x3, va2x3e, vb3e);
vacc3x3 = vfmaq_f32(vacc3x3, va3x3e, vb3e);
const float32x4_t vb0o = vreinterpretq_f32_u16(vandq_u16(vb0, vmask));
const float32x4_t vb1o = vreinterpretq_f32_u16(vandq_u16(vb1, vmask));
const float32x4_t vb2o = vreinterpretq_f32_u16(vandq_u16(vb2, vmask));
const float32x4_t vb3o = vreinterpretq_f32_u16(vandq_u16(vb3, vmask));
const float32x4_t va0x0o = vreinterpretq_f32_u16(vandq_u16(va0x0, vmask));
const float32x4_t va1x0o = vreinterpretq_f32_u16(vandq_u16(va1x0, vmask));
const float32x4_t va2x0o = vreinterpretq_f32_u16(vandq_u16(va2x0, vmask));
const float32x4_t va3x0o = vreinterpretq_f32_u16(vandq_u16(va3x0, vmask));
const float32x4_t va0x1o = vreinterpretq_f32_u16(vandq_u16(va0x1, vmask));
const float32x4_t va1x1o = vreinterpretq_f32_u16(vandq_u16(va1x1, vmask));
const float32x4_t va2x1o = vreinterpretq_f32_u16(vandq_u16(va2x1, vmask));
const float32x4_t va3x1o = vreinterpretq_f32_u16(vandq_u16(va3x1, vmask));
const float32x4_t va0x2o = vreinterpretq_f32_u16(vandq_u16(va0x2, vmask));
const float32x4_t va1x2o = vreinterpretq_f32_u16(vandq_u16(va1x2, vmask));
const float32x4_t va2x2o = vreinterpretq_f32_u16(vandq_u16(va2x2, vmask));
const float32x4_t va3x2o = vreinterpretq_f32_u16(vandq_u16(va3x2, vmask));
const float32x4_t va0x3o = vreinterpretq_f32_u16(vandq_u16(va0x3, vmask));
const float32x4_t va1x3o = vreinterpretq_f32_u16(vandq_u16(va1x3, vmask));
const float32x4_t va2x3o = vreinterpretq_f32_u16(vandq_u16(va2x3, vmask));
const float32x4_t va3x3o = vreinterpretq_f32_u16(vandq_u16(va3x3, vmask));
vacc0x0 = vfmaq_f32(vacc0x0, va0x0o, vb0o);
vacc1x0 = vfmaq_f32(vacc1x0, va1x0o, vb0o);
vacc2x0 = vfmaq_f32(vacc2x0, va2x0o, vb0o);
vacc3x0 = vfmaq_f32(vacc3x0, va3x0o, vb0o);
vacc0x1 = vfmaq_f32(vacc0x1, va0x1o, vb1o);
vacc1x1 = vfmaq_f32(vacc1x1, va1x1o, vb1o);
vacc2x1 = vfmaq_f32(vacc2x1, va2x1o, vb1o);
vacc3x1 = vfmaq_f32(vacc3x1, va3x1o, vb1o);
vacc0x2 = vfmaq_f32(vacc0x2, va0x2o, vb2o);
vacc1x2 = vfmaq_f32(vacc1x2, va1x2o, vb2o);
vacc2x2 = vfmaq_f32(vacc2x2, va2x2o, vb2o);
vacc3x2 = vfmaq_f32(vacc3x2, va3x2o, vb2o);
vacc0x3 = vfmaq_f32(vacc0x3, va0x3o, vb3o);
vacc1x3 = vfmaq_f32(vacc1x3, va1x3o, vb3o);
vacc2x3 = vfmaq_f32(vacc2x3, va2x3o, vb3o);
vacc3x3 = vfmaq_f32(vacc3x3, va3x3o, vb3o);
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
const float32x4_t vacc3x01 = vpaddq_f32(vacc3x0, vacc3x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
const float32x4_t vacc3x23 = vpaddq_f32(vacc3x2, vacc3x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
const float32x2_t vsum3x0 = vadd_f32(vget_low_f32(vacc3x0), vget_high_f32(vacc3x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
const float32x2_t vsum3x1 = vadd_f32(vget_low_f32(vacc3x1), vget_high_f32(vacc3x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
const float32x2_t vsum3x2 = vadd_f32(vget_low_f32(vacc3x2), vget_high_f32(vacc3x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
const float32x2_t vsum3x3 = vadd_f32(vget_low_f32(vacc3x3), vget_high_f32(vacc3x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16);
uint16x4_t vout1x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc1x0123), 16);
uint16x4_t vout2x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc2x0123), 16);
uint16x4_t vout3x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc3x0123), 16);
if XNN_LIKELY(nc >= 4) {
vst1_u16(c0, vout0x0123);
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1_u16(c1, vout1x0123);
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1_u16(c2, vout2x0123);
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1_u16(c3, vout3x0123);
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_u16(vout1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_u16(vout2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_u16(vout3x0123), 0); c3 += 2;
vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2);
vout1x0123 = vext_u16(vout1x0123, vout1x0123, 2);
vout2x0123 = vext_u16(vout2x0123, vout2x0123, 2);
vout3x0123 = vext_u16(vout3x0123, vout3x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vout0x0123, 0);
vst1_lane_u16(c1, vout1x0123, 0);
vst1_lane_u16(c2, vout2x0123, 0);
vst1_lane_u16(c3, vout3x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,773 | 49.494318 | 109 |
c
|
XNNPACK
|
XNNPACK-master/src/bf16-gemm/gen/bf16-gemm-4x4c8-minmax-neonfma-zip.c
|
// Auto-generated file. Do not edit!
// Template: src/bf16-gemm/c8-neon-zip.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
void xnn_bf16_gemm_minmax_ukernel_4x4c8__neonfma_zip(
size_t mr,
size_t nc,
size_t kc,
const void* restrict a,
size_t a_stride,
const void* restrict w_ptr,
void* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(uint16_t) == 0);
assert(a != NULL);
assert(w_ptr != NULL);
assert(c != NULL);
const uint16_t* a0 = (const uint16_t*) a;
uint16_t* c0 = (uint16_t*) c;
const uint16_t* a1 = (const uint16_t*) ((uintptr_t) a0 + a_stride);
uint16_t* c1 = (uint16_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
a1 = a0;
c1 = c0;
}
const uint16_t* a2 = (const uint16_t*) ((uintptr_t) a1 + a_stride);
uint16_t* c2 = (uint16_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
a2 = a1;
c2 = c1;
}
const uint16_t* a3 = (const uint16_t*) ((uintptr_t) a2 + a_stride);
uint16_t* c3 = (uint16_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
a3 = a2;
c3 = c2;
}
const uint16_t* w = (const uint16_t*) w_ptr;
const uint16x8_t vzero = vmovq_n_u16(0);
do {
float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
float32x4_t vacc1x0 = vacc0x0;
float32x4_t vacc1x1 = vacc0x1;
float32x4_t vacc1x2 = vacc0x2;
float32x4_t vacc1x3 = vacc0x3;
float32x4_t vacc2x0 = vacc0x0;
float32x4_t vacc2x1 = vacc0x1;
float32x4_t vacc2x2 = vacc0x2;
float32x4_t vacc2x3 = vacc0x3;
float32x4_t vacc3x0 = vacc0x0;
float32x4_t vacc3x1 = vacc0x1;
float32x4_t vacc3x2 = vacc0x2;
float32x4_t vacc3x3 = vacc0x3;
size_t k = kc;
for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) {
const uint16x8_t va0h = vld1q_u16(a0); a0 += 8;
const uint16x8_t va1h = vld1q_u16(a1); a1 += 8;
const uint16x8_t va2h = vld1q_u16(a2); a2 += 8;
const uint16x8_t va3h = vld1q_u16(a3); a3 += 8;
const uint16x8_t vb0h = vld1q_u16(w); w += 8;
const uint16x8_t vb1h = vld1q_u16(w); w += 8;
const uint16x8_t vb2h = vld1q_u16(w); w += 8;
const uint16x8_t vb3h = vld1q_u16(w); w += 8;
const uint16x8x2_t va0f = vzipq_u16(vzero, va0h);
const uint16x8x2_t va1f = vzipq_u16(vzero, va1h);
const uint16x8x2_t va2f = vzipq_u16(vzero, va2h);
const uint16x8x2_t va3f = vzipq_u16(vzero, va3h);
const uint16x8x2_t vb0f = vzipq_u16(vzero, vb0h);
const uint16x8x2_t vb1f = vzipq_u16(vzero, vb1h);
const uint16x8x2_t vb2f = vzipq_u16(vzero, vb2h);
const uint16x8x2_t vb3f = vzipq_u16(vzero, vb3h);
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc3x0 = vfmaq_f32(vacc3x0, vreinterpretq_f32_u16(va3f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc3x1 = vfmaq_f32(vacc3x1, vreinterpretq_f32_u16(va3f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc3x2 = vfmaq_f32(vacc3x2, vreinterpretq_f32_u16(va3f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc3x3 = vfmaq_f32(vacc3x3, vreinterpretq_f32_u16(va3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc3x0 = vfmaq_f32(vacc3x0, vreinterpretq_f32_u16(va3f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc3x1 = vfmaq_f32(vacc3x1, vreinterpretq_f32_u16(va3f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc3x2 = vfmaq_f32(vacc3x2, vreinterpretq_f32_u16(va3f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc3x3 = vfmaq_f32(vacc3x3, vreinterpretq_f32_u16(va3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
}
if XNN_UNLIKELY(k != 0) {
const uint16x8_t va0h = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k);
const uint16x8_t va1h = vld1q_u16(a1); a1 = (const uint16_t*) ((uintptr_t) a1 + k);
const uint16x8_t va2h = vld1q_u16(a2); a2 = (const uint16_t*) ((uintptr_t) a2 + k);
const uint16x8_t va3h = vld1q_u16(a3); a3 = (const uint16_t*) ((uintptr_t) a3 + k);
const uint16x8_t vb0h = vld1q_u16(w); w += 8;
const uint16x8_t vb1h = vld1q_u16(w); w += 8;
const uint16x8_t vb2h = vld1q_u16(w); w += 8;
const uint16x8_t vb3h = vld1q_u16(w); w += 8;
const uint16x8_t vm0h = vceqq_u16(vb0h, vmovq_n_u16(0));
const uint16x8_t vm1h = vceqq_u16(vb1h, vmovq_n_u16(0));
const uint16x8_t vm2h = vceqq_u16(vb2h, vmovq_n_u16(0));
const uint16x8_t vm3h = vceqq_u16(vb3h, vmovq_n_u16(0));
const uint16x8x2_t vb0f = vzipq_u16(vzero, vb0h);
const uint16x8x2_t vb1f = vzipq_u16(vzero, vb1h);
const uint16x8x2_t vb2f = vzipq_u16(vzero, vb2h);
const uint16x8x2_t vb3f = vzipq_u16(vzero, vb3h);
const uint16x8_t va0x0h = vbicq_u16(va0h, vm0h);
const uint16x8_t va1x0h = vbicq_u16(va1h, vm0h);
const uint16x8_t va2x0h = vbicq_u16(va2h, vm0h);
const uint16x8_t va3x0h = vbicq_u16(va3h, vm0h);
const uint16x8_t va0x1h = vbicq_u16(va0h, vm1h);
const uint16x8_t va1x1h = vbicq_u16(va1h, vm1h);
const uint16x8_t va2x1h = vbicq_u16(va2h, vm1h);
const uint16x8_t va3x1h = vbicq_u16(va3h, vm1h);
const uint16x8_t va0x2h = vbicq_u16(va0h, vm2h);
const uint16x8_t va1x2h = vbicq_u16(va1h, vm2h);
const uint16x8_t va2x2h = vbicq_u16(va2h, vm2h);
const uint16x8_t va3x2h = vbicq_u16(va3h, vm2h);
const uint16x8_t va0x3h = vbicq_u16(va0h, vm3h);
const uint16x8_t va1x3h = vbicq_u16(va1h, vm3h);
const uint16x8_t va2x3h = vbicq_u16(va2h, vm3h);
const uint16x8_t va3x3h = vbicq_u16(va3h, vm3h);
const uint16x8x2_t va0x0f = vzipq_u16(vzero, va0x0h);
const uint16x8x2_t va1x0f = vzipq_u16(vzero, va1x0h);
const uint16x8x2_t va2x0f = vzipq_u16(vzero, va2x0h);
const uint16x8x2_t va3x0f = vzipq_u16(vzero, va3x0h);
const uint16x8x2_t va0x1f = vzipq_u16(vzero, va0x1h);
const uint16x8x2_t va1x1f = vzipq_u16(vzero, va1x1h);
const uint16x8x2_t va2x1f = vzipq_u16(vzero, va2x1h);
const uint16x8x2_t va3x1f = vzipq_u16(vzero, va3x1h);
const uint16x8x2_t va0x2f = vzipq_u16(vzero, va0x2h);
const uint16x8x2_t va1x2f = vzipq_u16(vzero, va1x2h);
const uint16x8x2_t va2x2f = vzipq_u16(vzero, va2x2h);
const uint16x8x2_t va3x2f = vzipq_u16(vzero, va3x2h);
const uint16x8x2_t va0x3f = vzipq_u16(vzero, va0x3h);
const uint16x8x2_t va1x3f = vzipq_u16(vzero, va1x3h);
const uint16x8x2_t va2x3f = vzipq_u16(vzero, va2x3h);
const uint16x8x2_t va3x3f = vzipq_u16(vzero, va3x3h);
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc3x0 = vfmaq_f32(vacc3x0, vreinterpretq_f32_u16(va3x0f.val[0]), vreinterpretq_f32_u16(vb0f.val[0]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc3x1 = vfmaq_f32(vacc3x1, vreinterpretq_f32_u16(va3x1f.val[0]), vreinterpretq_f32_u16(vb1f.val[0]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc3x2 = vfmaq_f32(vacc3x2, vreinterpretq_f32_u16(va3x2f.val[0]), vreinterpretq_f32_u16(vb2f.val[0]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc3x3 = vfmaq_f32(vacc3x3, vreinterpretq_f32_u16(va3x3f.val[0]), vreinterpretq_f32_u16(vb3f.val[0]));
vacc0x0 = vfmaq_f32(vacc0x0, vreinterpretq_f32_u16(va0x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc1x0 = vfmaq_f32(vacc1x0, vreinterpretq_f32_u16(va1x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc2x0 = vfmaq_f32(vacc2x0, vreinterpretq_f32_u16(va2x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc3x0 = vfmaq_f32(vacc3x0, vreinterpretq_f32_u16(va3x0f.val[1]), vreinterpretq_f32_u16(vb0f.val[1]));
vacc0x1 = vfmaq_f32(vacc0x1, vreinterpretq_f32_u16(va0x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc1x1 = vfmaq_f32(vacc1x1, vreinterpretq_f32_u16(va1x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc2x1 = vfmaq_f32(vacc2x1, vreinterpretq_f32_u16(va2x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc3x1 = vfmaq_f32(vacc3x1, vreinterpretq_f32_u16(va3x1f.val[1]), vreinterpretq_f32_u16(vb1f.val[1]));
vacc0x2 = vfmaq_f32(vacc0x2, vreinterpretq_f32_u16(va0x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc1x2 = vfmaq_f32(vacc1x2, vreinterpretq_f32_u16(va1x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc2x2 = vfmaq_f32(vacc2x2, vreinterpretq_f32_u16(va2x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc3x2 = vfmaq_f32(vacc3x2, vreinterpretq_f32_u16(va3x2f.val[1]), vreinterpretq_f32_u16(vb2f.val[1]));
vacc0x3 = vfmaq_f32(vacc0x3, vreinterpretq_f32_u16(va0x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc1x3 = vfmaq_f32(vacc1x3, vreinterpretq_f32_u16(va1x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc2x3 = vfmaq_f32(vacc2x3, vreinterpretq_f32_u16(va2x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
vacc3x3 = vfmaq_f32(vacc3x3, vreinterpretq_f32_u16(va3x3f.val[1]), vreinterpretq_f32_u16(vb3f.val[1]));
}
#if XNN_ARCH_ARM64
const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
const float32x4_t vacc3x01 = vpaddq_f32(vacc3x0, vacc3x1);
const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
const float32x4_t vacc3x23 = vpaddq_f32(vacc3x2, vacc3x3);
float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23);
#else
const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
const float32x2_t vsum3x0 = vadd_f32(vget_low_f32(vacc3x0), vget_high_f32(vacc3x0));
const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
const float32x2_t vsum3x1 = vadd_f32(vget_low_f32(vacc3x1), vget_high_f32(vacc3x1));
const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
const float32x2_t vsum3x2 = vadd_f32(vget_low_f32(vacc3x2), vget_high_f32(vacc3x2));
const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
const float32x2_t vsum3x3 = vadd_f32(vget_low_f32(vacc3x3), vget_high_f32(vacc3x3));
float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3));
#endif
const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
vacc0x0123 = vminq_f32(vacc0x0123, vmax);
vacc1x0123 = vminq_f32(vacc1x0123, vmax);
vacc2x0123 = vminq_f32(vacc2x0123, vmax);
vacc3x0123 = vminq_f32(vacc3x0123, vmax);
const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16);
uint16x4_t vout1x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc1x0123), 16);
uint16x4_t vout2x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc2x0123), 16);
uint16x4_t vout3x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc3x0123), 16);
if XNN_LIKELY(nc >= 4) {
vst1_u16(c0, vout0x0123);
c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
vst1_u16(c1, vout1x0123);
c1 = (uint16_t*) ((uintptr_t) c1 + cn_stride);
vst1_u16(c2, vout2x0123);
c2 = (uint16_t*) ((uintptr_t) c2 + cn_stride);
vst1_u16(c3, vout3x0123);
c3 = (uint16_t*) ((uintptr_t) c3 + cn_stride);
a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
a1 = (const uint16_t*) ((uintptr_t) a1 - kc);
a2 = (const uint16_t*) ((uintptr_t) a2 - kc);
a3 = (const uint16_t*) ((uintptr_t) a3 - kc);
nc -= 4;
} else {
if (nc & 2) {
vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2;
vst1_lane_u32((void*) c1, vreinterpret_u32_u16(vout1x0123), 0); c1 += 2;
vst1_lane_u32((void*) c2, vreinterpret_u32_u16(vout2x0123), 0); c2 += 2;
vst1_lane_u32((void*) c3, vreinterpret_u32_u16(vout3x0123), 0); c3 += 2;
vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2);
vout1x0123 = vext_u16(vout1x0123, vout1x0123, 2);
vout2x0123 = vext_u16(vout2x0123, vout2x0123, 2);
vout3x0123 = vext_u16(vout3x0123, vout3x0123, 2);
}
if (nc & 1) {
vst1_lane_u16(c0, vout0x0123, 0);
vst1_lane_u16(c1, vout1x0123, 0);
vst1_lane_u16(c2, vout2x0123, 0);
vst1_lane_u16(c3, vout3x0123, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,209 | 55.90625 | 109 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.